From 580a179bbe4a06f1c66059a95b03a838f132aee6 Mon Sep 17 00:00:00 2001 From: ArunTamil21 Date: Thu, 26 Feb 2026 00:42:04 +0000 Subject: [PATCH 01/64] Add missing runtime tests for alias intrinsics: _mm_cvt_ss2si, _mm_cvtt_ss2si, _mm_cvt_si2ss, _mm_set_ps1 --- .../stdarch/crates/core_arch/src/x86/sse.rs | 71 +++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/library/stdarch/crates/core_arch/src/x86/sse.rs b/library/stdarch/crates/core_arch/src/x86/sse.rs index 3f7781cc7dc4c..0ec842f9fc7c6 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse.rs @@ -3013,6 +3013,22 @@ mod tests { } } + #[simd_test(enable = "sse")] + fn test_mm_cvt_ss2si() { + let inputs = &[42.0f32, -3.1, 4.0e10, 4.0e-20, NAN, 2147483500.1]; + let result = &[42i32, -3, i32::MIN, 0, i32::MIN, 2147483520]; + for i in 0..inputs.len() { + let x = _mm_setr_ps(inputs[i], 1.0, 3.0, 4.0); + let e = result[i]; + let r = _mm_cvt_ss2si(x); + assert_eq!( + e, r, + "TestCase #{} _mm_cvt_ss2si({:?}) = {}, expected: {}", + i, x, r, e + ); + } + } + #[simd_test(enable = "sse")] fn test_mm_cvttss_si32() { let inputs = &[ @@ -3038,6 +3054,31 @@ mod tests { } } + #[simd_test(enable = "sse")] + fn test_mm_cvtt_ss2si() { + let inputs = &[ + (42.0f32, 42i32), + (-31.4, -31), + (-33.5, -33), + (-34.5, -34), + (10.999, 10), + (-5.99, -5), + (4.0e10, i32::MIN), + (4.0e-10, 0), + (NAN, i32::MIN), + (2147483500.1, 2147483520), + ]; + for (i, &(xi, e)) in inputs.iter().enumerate() { + let x = _mm_setr_ps(xi, 1.0, 3.0, 4.0); + let r = _mm_cvtt_ss2si(x); + assert_eq!( + e, r, + "TestCase #{} _mm_cvtt_ss2si({:?}) = {}, expected: {}", + i, x, r, e + ); + } + } + #[simd_test(enable = "sse")] const fn test_mm_cvtsi32_ss() { let a = _mm_setr_ps(5.0, 6.0, 7.0, 8.0); @@ -3059,6 +3100,27 @@ mod tests { assert_eq_m128(e, r); } + #[simd_test(enable = "sse")] + fn test_mm_cvt_si2ss() { + let a = _mm_setr_ps(5.0, 6.0, 7.0, 8.0); + + let r = _mm_cvt_si2ss(a, 4555); + let e = _mm_setr_ps(4555.0, 6.0, 7.0, 8.0); + assert_eq_m128(e, r); + + let r = _mm_cvt_si2ss(a, 322223333); + let e = _mm_setr_ps(322223333.0, 6.0, 7.0, 8.0); + assert_eq_m128(e, r); + + let r = _mm_cvt_si2ss(a, -432); + let e = _mm_setr_ps(-432.0, 6.0, 7.0, 8.0); + assert_eq_m128(e, r); + + let r = _mm_cvt_si2ss(a, -322223333); + let e = _mm_setr_ps(-322223333.0, 6.0, 7.0, 8.0); + assert_eq_m128(e, r); + } + #[simd_test(enable = "sse")] const fn test_mm_cvtss_f32() { let a = _mm_setr_ps(312.0134, 5.0, 6.0, 7.0); @@ -3085,6 +3147,15 @@ mod tests { assert_eq!(get_m128(r2, 3), 4.25); } + #[simd_test(enable = "sse")] + const fn test_mm_set_ps1() { + let r = _mm_set_ps1(black_box(4.25)); + assert_eq!(get_m128(r, 0), 4.25); + assert_eq!(get_m128(r, 1), 4.25); + assert_eq!(get_m128(r, 2), 4.25); + assert_eq!(get_m128(r, 3), 4.25); + } + #[simd_test(enable = "sse")] const fn test_mm_set_ps() { let r = _mm_set_ps( From 63fb6c3c59898ada456b098aad6dcd9fb76807f3 Mon Sep 17 00:00:00 2001 From: ArunTamil21 Date: Thu, 26 Feb 2026 16:18:30 +0000 Subject: [PATCH 02/64] Refactor alias tests using meta function pattern and add missing tests for _mm_undefined_ps, _mm_prefetch, _mm_load_ps1, _mm_store_ps1 --- .../stdarch/crates/core_arch/src/x86/sse.rs | 216 ++++++++---------- .../crates/stdarch-verify/tests/x86-intel.rs | 7 +- 2 files changed, 94 insertions(+), 129 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/x86/sse.rs b/library/stdarch/crates/core_arch/src/x86/sse.rs index 0ec842f9fc7c6..4e9a3a3cb1746 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse.rs @@ -2997,40 +2997,28 @@ mod tests { } } - #[simd_test(enable = "sse")] - fn test_mm_cvtss_si32() { + fn test_mm_cvtss_si32_impl(f: fn(__m128) -> i32) { let inputs = &[42.0f32, -3.1, 4.0e10, 4.0e-20, NAN, 2147483500.1]; let result = &[42i32, -3, i32::MIN, 0, i32::MIN, 2147483520]; for i in 0..inputs.len() { - let x = _mm_setr_ps(inputs[i], 1.0, 3.0, 4.0); + let x = unsafe { _mm_setr_ps(inputs[i], 1.0, 3.0, 4.0) }; let e = result[i]; - let r = _mm_cvtss_si32(x); - assert_eq!( - e, r, - "TestCase #{} _mm_cvtss_si32({:?}) = {}, expected: {}", - i, x, r, e - ); + let r = f(x); + assert_eq!(e, r, "TestCase #{} f({:?}) = {}, expected: {}", i, x, r, e); } } #[simd_test(enable = "sse")] - fn test_mm_cvt_ss2si() { - let inputs = &[42.0f32, -3.1, 4.0e10, 4.0e-20, NAN, 2147483500.1]; - let result = &[42i32, -3, i32::MIN, 0, i32::MIN, 2147483520]; - for i in 0..inputs.len() { - let x = _mm_setr_ps(inputs[i], 1.0, 3.0, 4.0); - let e = result[i]; - let r = _mm_cvt_ss2si(x); - assert_eq!( - e, r, - "TestCase #{} _mm_cvt_ss2si({:?}) = {}, expected: {}", - i, x, r, e - ); - } + fn test_mm_cvtss_si32() { + test_mm_cvtss_si32_impl(_mm_cvtss_si32); } #[simd_test(enable = "sse")] - fn test_mm_cvttss_si32() { + fn test_mm_cvt_ss2si() { + test_mm_cvtss_si32_impl(_mm_cvt_ss2si); + } + + fn test_cvttss_si32_impl(f: fn(__m128) -> i32) { let inputs = &[ (42.0f32, 42i32), (-31.4, -31), @@ -3044,81 +3032,48 @@ mod tests { (2147483500.1, 2147483520), ]; for (i, &(xi, e)) in inputs.iter().enumerate() { - let x = _mm_setr_ps(xi, 1.0, 3.0, 4.0); - let r = _mm_cvttss_si32(x); - assert_eq!( - e, r, - "TestCase #{} _mm_cvttss_si32({:?}) = {}, expected: {}", - i, x, r, e - ); + let x = unsafe { _mm_setr_ps(xi, 1.0, 3.0, 4.0) }; + let r = f(x); + assert_eq!(e, r, "TestCase #{} f({:?}) = {}, expected: {}", i, x, r, e); } } #[simd_test(enable = "sse")] - fn test_mm_cvtt_ss2si() { - let inputs = &[ - (42.0f32, 42i32), - (-31.4, -31), - (-33.5, -33), - (-34.5, -34), - (10.999, 10), - (-5.99, -5), - (4.0e10, i32::MIN), - (4.0e-10, 0), - (NAN, i32::MIN), - (2147483500.1, 2147483520), - ]; - for (i, &(xi, e)) in inputs.iter().enumerate() { - let x = _mm_setr_ps(xi, 1.0, 3.0, 4.0); - let r = _mm_cvtt_ss2si(x); - assert_eq!( - e, r, - "TestCase #{} _mm_cvtt_ss2si({:?}) = {}, expected: {}", - i, x, r, e - ); - } + fn test_mm_cvttss_si32() { + test_cvttss_si32_impl(_mm_cvttss_si32); } #[simd_test(enable = "sse")] - const fn test_mm_cvtsi32_ss() { - let a = _mm_setr_ps(5.0, 6.0, 7.0, 8.0); + fn test_mm_cvtt_ss2si() { + test_cvttss_si32_impl(_mm_cvtt_ss2si) + } - let r = _mm_cvtsi32_ss(a, 4555); - let e = _mm_setr_ps(4555.0, 6.0, 7.0, 8.0); - assert_eq_m128(e, r); + fn test_mm_cvtsi32_ss_impl(f: fn(__m128, i32) -> __m128) { + unsafe { + let a = _mm_setr_ps(5.0, 6.0, 7.0, 8.0); - let r = _mm_cvtsi32_ss(a, 322223333); - let e = _mm_setr_ps(322223333.0, 6.0, 7.0, 8.0); - assert_eq_m128(e, r); + let r = f(a, 4555); + assert_eq_m128(_mm_setr_ps(4555.0, 6.0, 7.0, 8.0), r); - let r = _mm_cvtsi32_ss(a, -432); - let e = _mm_setr_ps(-432.0, 6.0, 7.0, 8.0); - assert_eq_m128(e, r); + let r = f(a, 322223333); + assert_eq_m128(_mm_setr_ps(322223333.0, 6.0, 7.0, 8.0), r); - let r = _mm_cvtsi32_ss(a, -322223333); - let e = _mm_setr_ps(-322223333.0, 6.0, 7.0, 8.0); - assert_eq_m128(e, r); + let r = f(a, -432); + assert_eq_m128(_mm_setr_ps(-432.0, 6.0, 7.0, 8.0), r); + + let r = f(a, -322223333); + assert_eq_m128(_mm_setr_ps(-322223333.0, 6.0, 7.0, 8.0), r); + } } #[simd_test(enable = "sse")] - fn test_mm_cvt_si2ss() { - let a = _mm_setr_ps(5.0, 6.0, 7.0, 8.0); - - let r = _mm_cvt_si2ss(a, 4555); - let e = _mm_setr_ps(4555.0, 6.0, 7.0, 8.0); - assert_eq_m128(e, r); - - let r = _mm_cvt_si2ss(a, 322223333); - let e = _mm_setr_ps(322223333.0, 6.0, 7.0, 8.0); - assert_eq_m128(e, r); - - let r = _mm_cvt_si2ss(a, -432); - let e = _mm_setr_ps(-432.0, 6.0, 7.0, 8.0); - assert_eq_m128(e, r); + fn test_mm_cvtsi32_ss() { + test_mm_cvtsi32_ss_impl(_mm_cvtsi32_ss); + } - let r = _mm_cvt_si2ss(a, -322223333); - let e = _mm_setr_ps(-322223333.0, 6.0, 7.0, 8.0); - assert_eq_m128(e, r); + #[simd_test(enable = "sse")] + fn test_mm_cvt_si2ss() { + test_mm_cvtsi32_ss_impl(_mm_cvt_si2ss); } #[simd_test(enable = "sse")] @@ -3133,27 +3088,25 @@ mod tests { assert_eq_m128(r, _mm_setr_ps(4.25, 0.0, 0.0, 0.0)); } + fn test_mm_set1_ps_impl(f: fn(f32) -> __m128) { + unsafe { + let r = f(black_box(4.25)); + assert_eq!(get_m128(r, 0), 4.25); + assert_eq!(get_m128(r, 1), 4.25); + assert_eq!(get_m128(r, 2), 4.25); + assert_eq!(get_m128(r, 3), 4.25); + } + } + #[simd_test(enable = "sse")] - const fn test_mm_set1_ps() { - let r1 = _mm_set1_ps(black_box(4.25)); - let r2 = _mm_set_ps1(black_box(4.25)); - assert_eq!(get_m128(r1, 0), 4.25); - assert_eq!(get_m128(r1, 1), 4.25); - assert_eq!(get_m128(r1, 2), 4.25); - assert_eq!(get_m128(r1, 3), 4.25); - assert_eq!(get_m128(r2, 0), 4.25); - assert_eq!(get_m128(r2, 1), 4.25); - assert_eq!(get_m128(r2, 2), 4.25); - assert_eq!(get_m128(r2, 3), 4.25); + fn test_mm_set1_ps() { + test_mm_set1_ps_impl(_mm_set1_ps); + test_mm_set1_ps_impl(_mm_set_ps1); } #[simd_test(enable = "sse")] - const fn test_mm_set_ps1() { - let r = _mm_set_ps1(black_box(4.25)); - assert_eq!(get_m128(r, 0), 4.25); - assert_eq!(get_m128(r, 1), 4.25); - assert_eq!(get_m128(r, 2), 4.25); - assert_eq!(get_m128(r, 3), 4.25); + fn test_mm_set_ps1() { + test_mm_set1_ps_impl(_mm_set_ps1); } #[simd_test(enable = "sse")] @@ -3242,11 +3195,20 @@ mod tests { assert_eq_m128(r, _mm_setr_ps(42.0, 0.0, 0.0, 0.0)); } - #[simd_test(enable = "sse")] - const fn test_mm_load1_ps() { + fn test_mm_load1_ps_impl(f: unsafe fn(*const f32) -> __m128) { let a = 42.0f32; - let r = unsafe { _mm_load1_ps(ptr::addr_of!(a)) }; - assert_eq_m128(r, _mm_setr_ps(42.0, 42.0, 42.0, 42.0)); + let r = unsafe { f(ptr::addr_of!(a)) }; + unsafe { assert_eq_m128(r, _mm_setr_ps(42.0, 42.0, 42.0, 42.0)) }; + } + + #[simd_test(enable = "sse")] + fn test_mm_load1_ps() { + test_mm_load1_ps_impl(_mm_load1_ps); + } + + #[simd_test(enable = "sse")] + fn test_mm_load_ps1() { + test_mm_load1_ps_impl(_mm_load_ps1); } #[simd_test(enable = "sse")] @@ -3298,34 +3260,24 @@ mod tests { assert_eq!(vals[2], 0.0); } - #[simd_test(enable = "sse")] - const fn test_mm_store1_ps() { + fn test_mm_store1_ps_impl(f: unsafe fn(*mut f32, __m128)) { let mut vals = Memory { data: [0.0f32; 4] }; - let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); - - // guaranteed to be aligned to 16 bytes + let a = unsafe { _mm_setr_ps(1.0, 2.0, 3.0, 4.0) }; let p = vals.data.as_mut_ptr(); - unsafe { - _mm_store1_ps(p, *black_box(&a)); + f(p, *black_box(&a)); } - assert_eq!(vals.data, [1.0, 1.0, 1.0, 1.0]); } #[simd_test(enable = "sse")] - const fn test_mm_store_ps() { - let mut vals = Memory { data: [0.0f32; 4] }; - let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); - - // guaranteed to be aligned to 16 bytes - let p = vals.data.as_mut_ptr(); - - unsafe { - _mm_store_ps(p, *black_box(&a)); - } + fn test_mm_store1_ps() { + test_mm_store1_ps_impl(_mm_store1_ps); + } - assert_eq!(vals.data, [1.0, 2.0, 3.0, 4.0]); + #[simd_test(enable = "sse")] + fn test_mm_store_ps1() { + test_mm_store1_ps_impl(_mm_store_ps1); } #[simd_test(enable = "sse")] @@ -3364,6 +3316,24 @@ mod tests { assert_eq!(vals.data, [0.0, 1.0, 2.0, 3.0, 4.0, 0.0, 0.0, 0.0]); } + #[simd_test(enable = "sse")] + fn test_mm_undefined_ps() { + // _mm_undefined_ps returns a vector with indeterminate elements, + // so we can only verify it doesn't crash. + let _r = _mm_undefined_ps(); + } + + #[simd_test(enable = "sse")] + fn test_mm_prefetch() { + // Prefetch only affects cache behavior, not program correctness, + // so we can only verify it doesn't crash for each hint strategy. + let data = 42.0f32; + _mm_prefetch::<_MM_HINT_T0>(ptr::addr_of!(data) as *const i8); + _mm_prefetch::<_MM_HINT_T1>(ptr::addr_of!(data) as *const i8); + _mm_prefetch::<_MM_HINT_T2>(ptr::addr_of!(data) as *const i8); + _mm_prefetch::<_MM_HINT_NTA>(ptr::addr_of!(data) as *const i8); + } + #[simd_test(enable = "sse")] const fn test_mm_move_ss() { let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); diff --git a/library/stdarch/crates/stdarch-verify/tests/x86-intel.rs b/library/stdarch/crates/stdarch-verify/tests/x86-intel.rs index 2ac05e28cb4ce..85f718038e8d3 100644 --- a/library/stdarch/crates/stdarch-verify/tests/x86-intel.rs +++ b/library/stdarch/crates/stdarch-verify/tests/x86-intel.rs @@ -209,12 +209,9 @@ fn verify_all_signatures() { "_rdseed16_step", "_rdseed32_step", "_rdseed64_step", - // Prefetch - "_mm_prefetch", // CMPXCHG "cmpxchg16b", - // Undefined - "_mm_undefined_ps", + // Undefined, "_mm_undefined_pd", "_mm_undefined_si128", "_mm_undefined_ph", @@ -250,8 +247,6 @@ fn verify_all_signatures() { "_mm_cvtt_ss2si", "_mm_cvt_si2ss", "_mm_set_ps1", - "_mm_load_ps1", - "_mm_store_ps1", "_mm_bslli_si128", "_mm_bsrli_si128", "_bextr2_u32", From 455b21ba99ed10bb569c011250148607a7eb1c02 Mon Sep 17 00:00:00 2001 From: ArunTamil21 Date: Thu, 26 Feb 2026 16:27:41 +0000 Subject: [PATCH 03/64] Restore deleted test_mm_store_ps --- library/stdarch/crates/core_arch/src/x86/sse.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/library/stdarch/crates/core_arch/src/x86/sse.rs b/library/stdarch/crates/core_arch/src/x86/sse.rs index 4e9a3a3cb1746..6a857f22b0c94 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse.rs @@ -3275,6 +3275,21 @@ mod tests { test_mm_store1_ps_impl(_mm_store1_ps); } + #[simd_test(enable = "sse")] + const fn test_mm_store_ps() { + let mut vals = Memory { data: [0.0f32; 4] }; + let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); + + // guaranteed to be aligned to 16 bytes + let p = vals.data.as_mut_ptr(); + + unsafe { + _mm_store_ps(p, *black_box(&a)); + } + + assert_eq!(vals.data, [1.0, 2.0, 3.0, 4.0]); + } + #[simd_test(enable = "sse")] fn test_mm_store_ps1() { test_mm_store1_ps_impl(_mm_store_ps1); From 952302abacf629a6fc8b57ede7a97d9eae9b3cdd Mon Sep 17 00:00:00 2001 From: ArunTamil21 Date: Fri, 27 Feb 2026 18:28:23 +0000 Subject: [PATCH 04/64] Remove redundant tests for _mm_prefetch and _mm_undefined_ps Already verified by assert_instr; no output to assert at runtime. --- .../stdarch/crates/core_arch/src/x86/sse.rs | 18 ------------------ .../crates/stdarch-verify/tests/x86-intel.rs | 3 +++ 2 files changed, 3 insertions(+), 18 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/x86/sse.rs b/library/stdarch/crates/core_arch/src/x86/sse.rs index 6a857f22b0c94..95666b5c3d730 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse.rs @@ -3331,24 +3331,6 @@ mod tests { assert_eq!(vals.data, [0.0, 1.0, 2.0, 3.0, 4.0, 0.0, 0.0, 0.0]); } - #[simd_test(enable = "sse")] - fn test_mm_undefined_ps() { - // _mm_undefined_ps returns a vector with indeterminate elements, - // so we can only verify it doesn't crash. - let _r = _mm_undefined_ps(); - } - - #[simd_test(enable = "sse")] - fn test_mm_prefetch() { - // Prefetch only affects cache behavior, not program correctness, - // so we can only verify it doesn't crash for each hint strategy. - let data = 42.0f32; - _mm_prefetch::<_MM_HINT_T0>(ptr::addr_of!(data) as *const i8); - _mm_prefetch::<_MM_HINT_T1>(ptr::addr_of!(data) as *const i8); - _mm_prefetch::<_MM_HINT_T2>(ptr::addr_of!(data) as *const i8); - _mm_prefetch::<_MM_HINT_NTA>(ptr::addr_of!(data) as *const i8); - } - #[simd_test(enable = "sse")] const fn test_mm_move_ss() { let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); diff --git a/library/stdarch/crates/stdarch-verify/tests/x86-intel.rs b/library/stdarch/crates/stdarch-verify/tests/x86-intel.rs index 85f718038e8d3..0ee32c826bb11 100644 --- a/library/stdarch/crates/stdarch-verify/tests/x86-intel.rs +++ b/library/stdarch/crates/stdarch-verify/tests/x86-intel.rs @@ -211,7 +211,10 @@ fn verify_all_signatures() { "_rdseed64_step", // CMPXCHG "cmpxchg16b", + //PREFETCH + "_mm_prefetch", // Undefined, + "_mm_undefined_ps", "_mm_undefined_pd", "_mm_undefined_si128", "_mm_undefined_ph", From 58e5c0e8795c002b92c218a527ea07e0307c0466 Mon Sep 17 00:00:00 2001 From: ArunTamil21 Date: Thu, 12 Mar 2026 13:01:57 +0000 Subject: [PATCH 05/64] Refactor alias tests using macros instead of meta functions --- .../stdarch/crates/core_arch/src/x86/sse.rs | 192 ++++++++---------- 1 file changed, 84 insertions(+), 108 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/x86/sse.rs b/library/stdarch/crates/core_arch/src/x86/sse.rs index 95666b5c3d730..93b2816869620 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse.rs @@ -2997,84 +2997,73 @@ mod tests { } } - fn test_mm_cvtss_si32_impl(f: fn(__m128) -> i32) { - let inputs = &[42.0f32, -3.1, 4.0e10, 4.0e-20, NAN, 2147483500.1]; - let result = &[42i32, -3, i32::MIN, 0, i32::MIN, 2147483520]; - for i in 0..inputs.len() { - let x = unsafe { _mm_setr_ps(inputs[i], 1.0, 3.0, 4.0) }; - let e = result[i]; - let r = f(x); - assert_eq!(e, r, "TestCase #{} f({:?}) = {}, expected: {}", i, x, r, e); + macro_rules! test_mm_cvtss_si32 { + ($($test_name:ident : $alias:ident),*) => {$( + #[simd_test(enable = "sse")] + unsafe fn $test_name() { + let inputs = &[42.0f32, -3.1, 4.0e10, 4.0e-20, NAN, 2147483500.1]; + let result = &[42i32, -3, i32::MIN, 0, i32::MIN, 2147483520]; + for i in 0..inputs.len() { + let x = _mm_setr_ps(inputs[i], 1.0, 3.0, 4.0); + let e = result[i]; + let r = $alias(x); + assert_eq!(e, r, "TestCase #{} f({:?}) = {}, expected: {}", i, x, r, e); + } + } + )*} + } + + test_mm_cvtss_si32!(test_mm_cvtss_si32: _mm_cvtss_si32, test_mm_cvt_ss2si: _mm_cvt_ss2si); + + macro_rules! test_cvttss_si32 { + ($($test_name:ident : $alias:ident),*) => {$( + #[simd_test(enable = "sse")] + unsafe fn $test_name() { + let inputs = &[ + (42.0f32, 42i32), + (-31.4, -31), + (-33.5, -33), + (-34.5, -34), + (10.999, 10), + (-5.99, -5), + (4.0e10, i32::MIN), + (4.0e-10, 0), + (NAN, i32::MIN), + (2147483500.1, 2147483520), + ]; + for (i, &(xi, e)) in inputs.iter().enumerate() { + let x = _mm_setr_ps(xi, 1.0, 3.0, 4.0); + let r = $alias(x); + assert_eq!(e, r, "TestCase #{} f({:?}) = {}, expected: {}", i, x, r, e); + } } - } - - #[simd_test(enable = "sse")] - fn test_mm_cvtss_si32() { - test_mm_cvtss_si32_impl(_mm_cvtss_si32); - } - - #[simd_test(enable = "sse")] - fn test_mm_cvt_ss2si() { - test_mm_cvtss_si32_impl(_mm_cvt_ss2si); - } - - fn test_cvttss_si32_impl(f: fn(__m128) -> i32) { - let inputs = &[ - (42.0f32, 42i32), - (-31.4, -31), - (-33.5, -33), - (-34.5, -34), - (10.999, 10), - (-5.99, -5), - (4.0e10, i32::MIN), - (4.0e-10, 0), - (NAN, i32::MIN), - (2147483500.1, 2147483520), - ]; - for (i, &(xi, e)) in inputs.iter().enumerate() { - let x = unsafe { _mm_setr_ps(xi, 1.0, 3.0, 4.0) }; - let r = f(x); - assert_eq!(e, r, "TestCase #{} f({:?}) = {}, expected: {}", i, x, r, e); - } - } - - #[simd_test(enable = "sse")] - fn test_mm_cvttss_si32() { - test_cvttss_si32_impl(_mm_cvttss_si32); - } + )*} +} - #[simd_test(enable = "sse")] - fn test_mm_cvtt_ss2si() { - test_cvttss_si32_impl(_mm_cvtt_ss2si) - } +test_cvttss_si32!(test_cvttss_si32: _mm_cvttss_si32, test_mm_cvtt_ss2si: _mm_cvtt_ss2si); - fn test_mm_cvtsi32_ss_impl(f: fn(__m128, i32) -> __m128) { - unsafe { +macro_rules! test_mm_cvtsi32_ss { + ($($test_name:ident : $alias:ident),*) => {$( + #[simd_test(enable = "sse")] + unsafe fn $test_name() { let a = _mm_setr_ps(5.0, 6.0, 7.0, 8.0); - let r = f(a, 4555); + let r = $alias(a, 4555); assert_eq_m128(_mm_setr_ps(4555.0, 6.0, 7.0, 8.0), r); - let r = f(a, 322223333); + let r = $alias(a, 322223333); assert_eq_m128(_mm_setr_ps(322223333.0, 6.0, 7.0, 8.0), r); - let r = f(a, -432); + let r = $alias(a, -432); assert_eq_m128(_mm_setr_ps(-432.0, 6.0, 7.0, 8.0), r); - let r = f(a, -322223333); + let r = $alias(a, -322223333); assert_eq_m128(_mm_setr_ps(-322223333.0, 6.0, 7.0, 8.0), r); } - } - - #[simd_test(enable = "sse")] - fn test_mm_cvtsi32_ss() { - test_mm_cvtsi32_ss_impl(_mm_cvtsi32_ss); - } + )*} +} - #[simd_test(enable = "sse")] - fn test_mm_cvt_si2ss() { - test_mm_cvtsi32_ss_impl(_mm_cvt_si2ss); - } +test_mm_cvtsi32_ss!(test_mm_cvtsi32_ss: _mm_cvtsi32_ss, test_mm_cvt_si2ss: _mm_cvt_si2ss); #[simd_test(enable = "sse")] const fn test_mm_cvtss_f32() { @@ -3088,26 +3077,20 @@ mod tests { assert_eq_m128(r, _mm_setr_ps(4.25, 0.0, 0.0, 0.0)); } - fn test_mm_set1_ps_impl(f: fn(f32) -> __m128) { - unsafe { - let r = f(black_box(4.25)); +macro_rules! test_mm_set1_ps { + ($($test_name:ident : $alias:ident),*) => {$( + #[simd_test(enable = "sse")] + unsafe fn $test_name() { + let r = $alias(black_box(4.25)); assert_eq!(get_m128(r, 0), 4.25); assert_eq!(get_m128(r, 1), 4.25); assert_eq!(get_m128(r, 2), 4.25); assert_eq!(get_m128(r, 3), 4.25); } - } - - #[simd_test(enable = "sse")] - fn test_mm_set1_ps() { - test_mm_set1_ps_impl(_mm_set1_ps); - test_mm_set1_ps_impl(_mm_set_ps1); - } + )*} +} - #[simd_test(enable = "sse")] - fn test_mm_set_ps1() { - test_mm_set1_ps_impl(_mm_set_ps1); - } +test_mm_set1_ps!(test_mm_set1_ps: _mm_set1_ps, test_mm_set_ps1: _mm_set_ps1); #[simd_test(enable = "sse")] const fn test_mm_set_ps() { @@ -3195,21 +3178,18 @@ mod tests { assert_eq_m128(r, _mm_setr_ps(42.0, 0.0, 0.0, 0.0)); } - fn test_mm_load1_ps_impl(f: unsafe fn(*const f32) -> __m128) { - let a = 42.0f32; - let r = unsafe { f(ptr::addr_of!(a)) }; - unsafe { assert_eq_m128(r, _mm_setr_ps(42.0, 42.0, 42.0, 42.0)) }; - } - - #[simd_test(enable = "sse")] - fn test_mm_load1_ps() { - test_mm_load1_ps_impl(_mm_load1_ps); - } + macro_rules! test_mm_load1_ps { + ($($test_name:ident : $alias:ident),*) => {$( + #[simd_test(enable = "sse")] + unsafe fn $test_name() { + let a = 42.0f32; + let r = $alias(ptr::addr_of!(a)); + assert_eq_m128(r, _mm_setr_ps(42.0, 42.0, 42.0, 42.0)); + } + )*} +} - #[simd_test(enable = "sse")] - fn test_mm_load_ps1() { - test_mm_load1_ps_impl(_mm_load_ps1); - } +test_mm_load1_ps!(test_mm_load1_ps: _mm_load1_ps, test_mm_load_ps1: _mm_load_ps1); #[simd_test(enable = "sse")] const fn test_mm_load_ps() { @@ -3260,20 +3240,20 @@ mod tests { assert_eq!(vals[2], 0.0); } - fn test_mm_store1_ps_impl(f: unsafe fn(*mut f32, __m128)) { - let mut vals = Memory { data: [0.0f32; 4] }; - let a = unsafe { _mm_setr_ps(1.0, 2.0, 3.0, 4.0) }; - let p = vals.data.as_mut_ptr(); - unsafe { - f(p, *black_box(&a)); +macro_rules! test_mm_store1_ps { + ($($test_name:ident : $alias:ident),*) => {$( + #[simd_test(enable = "sse")] + unsafe fn $test_name() { + let mut vals = Memory { data: [0.0f32; 4] }; + let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); + let p = vals.data.as_mut_ptr(); + $alias(p, *black_box(&a)); + assert_eq!(vals.data, [1.0, 1.0, 1.0, 1.0]); } - assert_eq!(vals.data, [1.0, 1.0, 1.0, 1.0]); - } + )*} +} - #[simd_test(enable = "sse")] - fn test_mm_store1_ps() { - test_mm_store1_ps_impl(_mm_store1_ps); - } +test_mm_store1_ps!(test_mm_store1_ps: _mm_store1_ps, test_mm_store_ps1: _mm_store_ps1); #[simd_test(enable = "sse")] const fn test_mm_store_ps() { @@ -3290,10 +3270,6 @@ mod tests { assert_eq!(vals.data, [1.0, 2.0, 3.0, 4.0]); } - #[simd_test(enable = "sse")] - fn test_mm_store_ps1() { - test_mm_store1_ps_impl(_mm_store_ps1); - } #[simd_test(enable = "sse")] const fn test_mm_storer_ps() { From 0eb04eb555ea1d15ad83991f129062ac9ff2611c Mon Sep 17 00:00:00 2001 From: ArunTamil21 Date: Thu, 12 Mar 2026 13:16:36 +0000 Subject: [PATCH 06/64] Fix formatting --- .../stdarch/crates/core_arch/src/x86/sse.rs | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/x86/sse.rs b/library/stdarch/crates/core_arch/src/x86/sse.rs index 93b2816869620..e12087da23d96 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse.rs @@ -3040,9 +3040,9 @@ mod tests { )*} } -test_cvttss_si32!(test_cvttss_si32: _mm_cvttss_si32, test_mm_cvtt_ss2si: _mm_cvtt_ss2si); + test_cvttss_si32!(test_cvttss_si32: _mm_cvttss_si32, test_mm_cvtt_ss2si: _mm_cvtt_ss2si); -macro_rules! test_mm_cvtsi32_ss { + macro_rules! test_mm_cvtsi32_ss { ($($test_name:ident : $alias:ident),*) => {$( #[simd_test(enable = "sse")] unsafe fn $test_name() { @@ -3063,7 +3063,7 @@ macro_rules! test_mm_cvtsi32_ss { )*} } -test_mm_cvtsi32_ss!(test_mm_cvtsi32_ss: _mm_cvtsi32_ss, test_mm_cvt_si2ss: _mm_cvt_si2ss); + test_mm_cvtsi32_ss!(test_mm_cvtsi32_ss: _mm_cvtsi32_ss, test_mm_cvt_si2ss: _mm_cvt_si2ss); #[simd_test(enable = "sse")] const fn test_mm_cvtss_f32() { @@ -3077,7 +3077,7 @@ test_mm_cvtsi32_ss!(test_mm_cvtsi32_ss: _mm_cvtsi32_ss, test_mm_cvt_si2ss: _mm_c assert_eq_m128(r, _mm_setr_ps(4.25, 0.0, 0.0, 0.0)); } -macro_rules! test_mm_set1_ps { + macro_rules! test_mm_set1_ps { ($($test_name:ident : $alias:ident),*) => {$( #[simd_test(enable = "sse")] unsafe fn $test_name() { @@ -3090,7 +3090,7 @@ macro_rules! test_mm_set1_ps { )*} } -test_mm_set1_ps!(test_mm_set1_ps: _mm_set1_ps, test_mm_set_ps1: _mm_set_ps1); + test_mm_set1_ps!(test_mm_set1_ps: _mm_set1_ps, test_mm_set_ps1: _mm_set_ps1); #[simd_test(enable = "sse")] const fn test_mm_set_ps() { @@ -3178,7 +3178,7 @@ test_mm_set1_ps!(test_mm_set1_ps: _mm_set1_ps, test_mm_set_ps1: _mm_set_ps1); assert_eq_m128(r, _mm_setr_ps(42.0, 0.0, 0.0, 0.0)); } - macro_rules! test_mm_load1_ps { + macro_rules! test_mm_load1_ps { ($($test_name:ident : $alias:ident),*) => {$( #[simd_test(enable = "sse")] unsafe fn $test_name() { @@ -3189,7 +3189,7 @@ test_mm_set1_ps!(test_mm_set1_ps: _mm_set1_ps, test_mm_set_ps1: _mm_set_ps1); )*} } -test_mm_load1_ps!(test_mm_load1_ps: _mm_load1_ps, test_mm_load_ps1: _mm_load_ps1); + test_mm_load1_ps!(test_mm_load1_ps: _mm_load1_ps, test_mm_load_ps1: _mm_load_ps1); #[simd_test(enable = "sse")] const fn test_mm_load_ps() { @@ -3240,7 +3240,7 @@ test_mm_load1_ps!(test_mm_load1_ps: _mm_load1_ps, test_mm_load_ps1: _mm_load_ps1 assert_eq!(vals[2], 0.0); } -macro_rules! test_mm_store1_ps { + macro_rules! test_mm_store1_ps { ($($test_name:ident : $alias:ident),*) => {$( #[simd_test(enable = "sse")] unsafe fn $test_name() { @@ -3253,7 +3253,7 @@ macro_rules! test_mm_store1_ps { )*} } -test_mm_store1_ps!(test_mm_store1_ps: _mm_store1_ps, test_mm_store_ps1: _mm_store_ps1); + test_mm_store1_ps!(test_mm_store1_ps: _mm_store1_ps, test_mm_store_ps1: _mm_store_ps1); #[simd_test(enable = "sse")] const fn test_mm_store_ps() { @@ -3270,7 +3270,6 @@ test_mm_store1_ps!(test_mm_store1_ps: _mm_store1_ps, test_mm_store_ps1: _mm_stor assert_eq!(vals.data, [1.0, 2.0, 3.0, 4.0]); } - #[simd_test(enable = "sse")] const fn test_mm_storer_ps() { let mut vals = Memory { data: [0.0f32; 4] }; From 68a6acef71af4ae7130a8da46b9777afe0f9f242 Mon Sep 17 00:00:00 2001 From: ArunTamil21 Date: Tue, 17 Mar 2026 22:16:33 +0000 Subject: [PATCH 07/64] Use macros for test body deduplication in SSE alias tests --- .../stdarch/crates/core_arch/src/x86/sse.rs | 210 ++++++++++-------- 1 file changed, 120 insertions(+), 90 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/x86/sse.rs b/library/stdarch/crates/core_arch/src/x86/sse.rs index e12087da23d96..fbce52fc29ead 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse.rs @@ -2997,73 +2997,88 @@ mod tests { } } - macro_rules! test_mm_cvtss_si32 { - ($($test_name:ident : $alias:ident),*) => {$( - #[simd_test(enable = "sse")] - unsafe fn $test_name() { - let inputs = &[42.0f32, -3.1, 4.0e10, 4.0e-20, NAN, 2147483500.1]; - let result = &[42i32, -3, i32::MIN, 0, i32::MIN, 2147483520]; - for i in 0..inputs.len() { - let x = _mm_setr_ps(inputs[i], 1.0, 3.0, 4.0); - let e = result[i]; - let r = $alias(x); - assert_eq!(e, r, "TestCase #{} f({:?}) = {}, expected: {}", i, x, r, e); - } + macro_rules! test_mm_cvtss_si32_impl { + ($alias:ident) => { + let inputs = &[42.0f32, -3.1, 4.0e10, 4.0e-20, NAN, 2147483500.1]; + let result = &[42i32, -3, i32::MIN, 0, i32::MIN, 2147483520]; + for i in 0..inputs.len() { + let x = _mm_setr_ps(inputs[i], 1.0, 3.0, 4.0); + let e = result[i]; + let r = $alias(x); + assert_eq!(e, r, "TestCase #{} f({:?}) = {}, expected: {}", i, x, r, e); } - )*} - } - - test_mm_cvtss_si32!(test_mm_cvtss_si32: _mm_cvtss_si32, test_mm_cvt_ss2si: _mm_cvt_ss2si); - - macro_rules! test_cvttss_si32 { - ($($test_name:ident : $alias:ident),*) => {$( - #[simd_test(enable = "sse")] - unsafe fn $test_name() { - let inputs = &[ - (42.0f32, 42i32), - (-31.4, -31), - (-33.5, -33), - (-34.5, -34), - (10.999, 10), - (-5.99, -5), - (4.0e10, i32::MIN), - (4.0e-10, 0), - (NAN, i32::MIN), - (2147483500.1, 2147483520), - ]; - for (i, &(xi, e)) in inputs.iter().enumerate() { - let x = _mm_setr_ps(xi, 1.0, 3.0, 4.0); - let r = $alias(x); - assert_eq!(e, r, "TestCase #{} f({:?}) = {}, expected: {}", i, x, r, e); + } + } + + #[simd_test(enable = "sse")] + unsafe fn test_mm_cvtss_si32() { + test_mm_cvtss_si32_impl!(_mm_cvtss_si32); + } + + #[simd_test(enable = "sse")] + unsafe fn test_mm_cvt_ss2si() { + test_mm_cvtss_si32_impl!(_mm_cvt_ss2si); + } + + macro_rules! test_cvttss_si32_impl { + ($alias:ident) => { + let inputs = &[ + (42.0f32, 42i32), + (-31.4, -31), + (-33.5, -33), + (-34.5, -34), + (10.999, 10), + (-5.99, -5), + (4.0e10, i32::MIN), + (4.0e-10, 0), + (NAN, i32::MIN), + (2147483500.1, 2147483520), + ]; + for (i, &(xi, e)) in inputs.iter().enumerate() { + let x = _mm_setr_ps(xi, 1.0, 3.0, 4.0); + let r = $alias(x); + assert_eq!(e, r, "TestCase #{} f({:?}) = {}, expected: {}", i, x, r, e); } } - )*} -} + } + + #[simd_test(enable = "sse")] + unsafe fn test_mm_cvttss_si32() { + test_cvttss_si32_impl!(_mm_cvttss_si32); + } - test_cvttss_si32!(test_cvttss_si32: _mm_cvttss_si32, test_mm_cvtt_ss2si: _mm_cvtt_ss2si); + #[simd_test(enable = "sse")] + unsafe fn test_mm_cvtt_ss2si() { + test_cvttss_si32_impl!(_mm_cvtt_ss2si); + } - macro_rules! test_mm_cvtsi32_ss { - ($($test_name:ident : $alias:ident),*) => {$( - #[simd_test(enable = "sse")] - unsafe fn $test_name() { - let a = _mm_setr_ps(5.0, 6.0, 7.0, 8.0); + macro_rules! test_mm_cvtsi32_ss_impl { + ($alias:ident) => { + let a = _mm_setr_ps(5.0, 6.0, 7.0, 8.0); - let r = $alias(a, 4555); - assert_eq_m128(_mm_setr_ps(4555.0, 6.0, 7.0, 8.0), r); + let r = $alias(a, 4555); + assert_eq_m128(_mm_setr_ps(4555.0, 6.0, 7.0, 8.0), r); - let r = $alias(a, 322223333); - assert_eq_m128(_mm_setr_ps(322223333.0, 6.0, 7.0, 8.0), r); + let r = $alias(a, 322223333); + assert_eq_m128(_mm_setr_ps(322223333.0, 6.0, 7.0, 8.0), r); - let r = $alias(a, -432); - assert_eq_m128(_mm_setr_ps(-432.0, 6.0, 7.0, 8.0), r); + let r = $alias(a, -432); + assert_eq_m128(_mm_setr_ps(-432.0, 6.0, 7.0, 8.0), r); - let r = $alias(a, -322223333); - assert_eq_m128(_mm_setr_ps(-322223333.0, 6.0, 7.0, 8.0), r); + let r = $alias(a, -322223333); + assert_eq_m128(_mm_setr_ps(-322223333.0, 6.0, 7.0, 8.0), r); } - )*} -} + } - test_mm_cvtsi32_ss!(test_mm_cvtsi32_ss: _mm_cvtsi32_ss, test_mm_cvt_si2ss: _mm_cvt_si2ss); + #[simd_test(enable = "sse")] + unsafe fn test_mm_cvtsi32_ss() { + test_mm_cvtsi32_ss_impl!(_mm_cvtsi32_ss); + } + + #[simd_test(enable = "sse")] + unsafe fn test_mm_cvt_si2ss() { + test_mm_cvtsi32_ss_impl!(_mm_cvt_si2ss); + } #[simd_test(enable = "sse")] const fn test_mm_cvtss_f32() { @@ -3077,20 +3092,25 @@ mod tests { assert_eq_m128(r, _mm_setr_ps(4.25, 0.0, 0.0, 0.0)); } - macro_rules! test_mm_set1_ps { - ($($test_name:ident : $alias:ident),*) => {$( - #[simd_test(enable = "sse")] - unsafe fn $test_name() { - let r = $alias(black_box(4.25)); - assert_eq!(get_m128(r, 0), 4.25); - assert_eq!(get_m128(r, 1), 4.25); - assert_eq!(get_m128(r, 2), 4.25); - assert_eq!(get_m128(r, 3), 4.25); + macro_rules! test_mm_set1_ps_impl { + ($alias:ident) => { + let r = $alias(black_box(4.25)); + assert_eq!(get_m128(r, 0), 4.25); + assert_eq!(get_m128(r, 1), 4.25); + assert_eq!(get_m128(r, 2), 4.25); + assert_eq!(get_m128(r, 3), 4.25); } - )*} -} + } + + #[simd_test(enable = "sse")] + unsafe fn test_mm_set1_ps() { + test_mm_set1_ps_impl!(_mm_set1_ps); + } - test_mm_set1_ps!(test_mm_set1_ps: _mm_set1_ps, test_mm_set_ps1: _mm_set_ps1); + #[simd_test(enable = "sse")] + unsafe fn test_mm_set_ps1() { + test_mm_set1_ps_impl!(_mm_set_ps1); + } #[simd_test(enable = "sse")] const fn test_mm_set_ps() { @@ -3178,18 +3198,23 @@ mod tests { assert_eq_m128(r, _mm_setr_ps(42.0, 0.0, 0.0, 0.0)); } - macro_rules! test_mm_load1_ps { - ($($test_name:ident : $alias:ident),*) => {$( - #[simd_test(enable = "sse")] - unsafe fn $test_name() { - let a = 42.0f32; - let r = $alias(ptr::addr_of!(a)); - assert_eq_m128(r, _mm_setr_ps(42.0, 42.0, 42.0, 42.0)); + macro_rules! test_mm_load1_ps_impl { + ($alias:ident) => { + let a = 42.0f32; + let r = $alias(ptr::addr_of!(a)); + assert_eq_m128(r, _mm_setr_ps(42.0, 42.0, 42.0, 42.0)); } - )*} -} + } + + #[simd_test(enable = "sse")] + unsafe fn test_mm_load1_ps() { + test_mm_load1_ps_impl!(_mm_load1_ps); + } - test_mm_load1_ps!(test_mm_load1_ps: _mm_load1_ps, test_mm_load_ps1: _mm_load_ps1); + #[simd_test(enable = "sse")] + unsafe fn test_mm_load_ps1() { + test_mm_load1_ps_impl!(_mm_load_ps1); + } #[simd_test(enable = "sse")] const fn test_mm_load_ps() { @@ -3240,20 +3265,25 @@ mod tests { assert_eq!(vals[2], 0.0); } - macro_rules! test_mm_store1_ps { - ($($test_name:ident : $alias:ident),*) => {$( - #[simd_test(enable = "sse")] - unsafe fn $test_name() { - let mut vals = Memory { data: [0.0f32; 4] }; - let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); - let p = vals.data.as_mut_ptr(); - $alias(p, *black_box(&a)); - assert_eq!(vals.data, [1.0, 1.0, 1.0, 1.0]); + macro_rules! test_mm_store1_ps_impl { + ($alias:ident) => { + let mut vals = Memory { data: [0.0f32; 4] }; + let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); + let p = vals.data.as_mut_ptr(); + $alias(p, *black_box(&a)); + assert_eq!(vals.data, [1.0, 1.0, 1.0, 1.0]); } - )*} -} + } + + #[simd_test(enable = "sse")] + unsafe fn test_mm_store1_ps() { + test_mm_store1_ps_impl!(_mm_store1_ps); + } - test_mm_store1_ps!(test_mm_store1_ps: _mm_store1_ps, test_mm_store_ps1: _mm_store_ps1); + #[simd_test(enable = "sse")] + unsafe fn test_mm_store_ps1() { + test_mm_store1_ps_impl!(_mm_store_ps1); + } #[simd_test(enable = "sse")] const fn test_mm_store_ps() { From 7da5fbcf42216bde5aa7350046acc9dfcba2a701 Mon Sep 17 00:00:00 2001 From: ArunTamil21 Date: Tue, 17 Mar 2026 22:18:40 +0000 Subject: [PATCH 08/64] Fix formatting --- .../stdarch/crates/core_arch/src/x86/sse.rs | 114 +++++++++--------- 1 file changed, 57 insertions(+), 57 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/x86/sse.rs b/library/stdarch/crates/core_arch/src/x86/sse.rs index fbce52fc29ead..11fb3a865b30d 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse.rs @@ -2998,16 +2998,16 @@ mod tests { } macro_rules! test_mm_cvtss_si32_impl { - ($alias:ident) => { - let inputs = &[42.0f32, -3.1, 4.0e10, 4.0e-20, NAN, 2147483500.1]; - let result = &[42i32, -3, i32::MIN, 0, i32::MIN, 2147483520]; - for i in 0..inputs.len() { - let x = _mm_setr_ps(inputs[i], 1.0, 3.0, 4.0); - let e = result[i]; - let r = $alias(x); - assert_eq!(e, r, "TestCase #{} f({:?}) = {}, expected: {}", i, x, r, e); + ($alias:ident) => { + let inputs = &[42.0f32, -3.1, 4.0e10, 4.0e-20, NAN, 2147483500.1]; + let result = &[42i32, -3, i32::MIN, 0, i32::MIN, 2147483520]; + for i in 0..inputs.len() { + let x = _mm_setr_ps(inputs[i], 1.0, 3.0, 4.0); + let e = result[i]; + let r = $alias(x); + assert_eq!(e, r, "TestCase #{} f({:?}) = {}, expected: {}", i, x, r, e); } - } + }; } #[simd_test(enable = "sse")] @@ -3021,25 +3021,25 @@ mod tests { } macro_rules! test_cvttss_si32_impl { - ($alias:ident) => { - let inputs = &[ - (42.0f32, 42i32), - (-31.4, -31), - (-33.5, -33), - (-34.5, -34), - (10.999, 10), - (-5.99, -5), - (4.0e10, i32::MIN), - (4.0e-10, 0), - (NAN, i32::MIN), - (2147483500.1, 2147483520), - ]; - for (i, &(xi, e)) in inputs.iter().enumerate() { - let x = _mm_setr_ps(xi, 1.0, 3.0, 4.0); - let r = $alias(x); - assert_eq!(e, r, "TestCase #{} f({:?}) = {}, expected: {}", i, x, r, e); + ($alias:ident) => { + let inputs = &[ + (42.0f32, 42i32), + (-31.4, -31), + (-33.5, -33), + (-34.5, -34), + (10.999, 10), + (-5.99, -5), + (4.0e10, i32::MIN), + (4.0e-10, 0), + (NAN, i32::MIN), + (2147483500.1, 2147483520), + ]; + for (i, &(xi, e)) in inputs.iter().enumerate() { + let x = _mm_setr_ps(xi, 1.0, 3.0, 4.0); + let r = $alias(x); + assert_eq!(e, r, "TestCase #{} f({:?}) = {}, expected: {}", i, x, r, e); } - } + }; } #[simd_test(enable = "sse")] @@ -3053,21 +3053,21 @@ mod tests { } macro_rules! test_mm_cvtsi32_ss_impl { - ($alias:ident) => { - let a = _mm_setr_ps(5.0, 6.0, 7.0, 8.0); + ($alias:ident) => { + let a = _mm_setr_ps(5.0, 6.0, 7.0, 8.0); - let r = $alias(a, 4555); - assert_eq_m128(_mm_setr_ps(4555.0, 6.0, 7.0, 8.0), r); + let r = $alias(a, 4555); + assert_eq_m128(_mm_setr_ps(4555.0, 6.0, 7.0, 8.0), r); - let r = $alias(a, 322223333); - assert_eq_m128(_mm_setr_ps(322223333.0, 6.0, 7.0, 8.0), r); + let r = $alias(a, 322223333); + assert_eq_m128(_mm_setr_ps(322223333.0, 6.0, 7.0, 8.0), r); - let r = $alias(a, -432); - assert_eq_m128(_mm_setr_ps(-432.0, 6.0, 7.0, 8.0), r); + let r = $alias(a, -432); + assert_eq_m128(_mm_setr_ps(-432.0, 6.0, 7.0, 8.0), r); - let r = $alias(a, -322223333); - assert_eq_m128(_mm_setr_ps(-322223333.0, 6.0, 7.0, 8.0), r); - } + let r = $alias(a, -322223333); + assert_eq_m128(_mm_setr_ps(-322223333.0, 6.0, 7.0, 8.0), r); + }; } #[simd_test(enable = "sse")] @@ -3093,13 +3093,13 @@ mod tests { } macro_rules! test_mm_set1_ps_impl { - ($alias:ident) => { - let r = $alias(black_box(4.25)); - assert_eq!(get_m128(r, 0), 4.25); - assert_eq!(get_m128(r, 1), 4.25); - assert_eq!(get_m128(r, 2), 4.25); - assert_eq!(get_m128(r, 3), 4.25); - } + ($alias:ident) => { + let r = $alias(black_box(4.25)); + assert_eq!(get_m128(r, 0), 4.25); + assert_eq!(get_m128(r, 1), 4.25); + assert_eq!(get_m128(r, 2), 4.25); + assert_eq!(get_m128(r, 3), 4.25); + }; } #[simd_test(enable = "sse")] @@ -3199,11 +3199,11 @@ mod tests { } macro_rules! test_mm_load1_ps_impl { - ($alias:ident) => { - let a = 42.0f32; - let r = $alias(ptr::addr_of!(a)); - assert_eq_m128(r, _mm_setr_ps(42.0, 42.0, 42.0, 42.0)); - } + ($alias:ident) => { + let a = 42.0f32; + let r = $alias(ptr::addr_of!(a)); + assert_eq_m128(r, _mm_setr_ps(42.0, 42.0, 42.0, 42.0)); + }; } #[simd_test(enable = "sse")] @@ -3266,13 +3266,13 @@ mod tests { } macro_rules! test_mm_store1_ps_impl { - ($alias:ident) => { - let mut vals = Memory { data: [0.0f32; 4] }; - let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); - let p = vals.data.as_mut_ptr(); - $alias(p, *black_box(&a)); - assert_eq!(vals.data, [1.0, 1.0, 1.0, 1.0]); - } + ($alias:ident) => { + let mut vals = Memory { data: [0.0f32; 4] }; + let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); + let p = vals.data.as_mut_ptr(); + $alias(p, *black_box(&a)); + assert_eq!(vals.data, [1.0, 1.0, 1.0, 1.0]); + }; } #[simd_test(enable = "sse")] From 9adc2b00fc498f993b27140b3675c80388daa881 Mon Sep 17 00:00:00 2001 From: WANG Rui Date: Thu, 19 Mar 2026 20:39:38 +0800 Subject: [PATCH 09/64] Mark the LoongArch intrinsics as inline(always) --- .../crates/core_arch/src/loongarch32/mod.rs | 8 +- .../src/loongarch64/lasx/generated.rs | 1516 ++++++++--------- .../src/loongarch64/lsx/generated.rs | 1440 ++++++++-------- .../crates/core_arch/src/loongarch64/mod.rs | 26 +- .../core_arch/src/loongarch_shared/mod.rs | 50 +- .../crates/stdarch-gen-loongarch/src/main.rs | 4 +- 6 files changed, 1522 insertions(+), 1522 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/loongarch32/mod.rs b/library/stdarch/crates/core_arch/src/loongarch32/mod.rs index 4e3f3d27182e4..6cc1116113e1d 100644 --- a/library/stdarch/crates/core_arch/src/loongarch32/mod.rs +++ b/library/stdarch/crates/core_arch/src/loongarch32/mod.rs @@ -15,7 +15,7 @@ unsafe extern "unadjusted" { } /// Generates the cache operation instruction -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn cacop(b: i32) { static_assert_uimm_bits!(IMM5, 5); @@ -24,7 +24,7 @@ pub unsafe fn cacop(b: i32) { } /// Reads the CSR -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn csrrd() -> i32 { static_assert_uimm_bits!(IMM14, 14); @@ -32,7 +32,7 @@ pub unsafe fn csrrd() -> i32 { } /// Writes the CSR -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn csrwr(a: i32) -> i32 { static_assert_uimm_bits!(IMM14, 14); @@ -40,7 +40,7 @@ pub unsafe fn csrwr(a: i32) -> i32 { } /// Exchanges the CSR -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn csrxchg(a: i32, b: i32) -> i32 { static_assert_uimm_bits!(IMM14, 14); diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs b/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs index 1d9d4e8248e63..d2e1a87fde46f 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs @@ -1529,35 +1529,35 @@ unsafe extern "unadjusted" { fn __lasx_insert_128_hi(a: __v4i64, b: __v2i64) -> __v4i64; } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsll_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsll_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsll_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsll_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsll_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsll_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsll_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsll_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1566,7 +1566,7 @@ pub fn lasx_xvslli_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvslli_b(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1575,7 +1575,7 @@ pub fn lasx_xvslli_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvslli_h(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1584,7 +1584,7 @@ pub fn lasx_xvslli_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvslli_w(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1593,35 +1593,35 @@ pub fn lasx_xvslli_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvslli_d(transmute(a), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsra_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsra_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsra_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsra_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsra_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsra_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsra_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsra_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1630,7 +1630,7 @@ pub fn lasx_xvsrai_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrai_b(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1639,7 +1639,7 @@ pub fn lasx_xvsrai_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrai_h(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1648,7 +1648,7 @@ pub fn lasx_xvsrai_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrai_w(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1657,35 +1657,35 @@ pub fn lasx_xvsrai_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrai_d(transmute(a), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrar_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrar_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrar_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrar_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrar_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrar_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrar_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrar_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1694,7 +1694,7 @@ pub fn lasx_xvsrari_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrari_b(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1703,7 +1703,7 @@ pub fn lasx_xvsrari_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrari_h(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1712,7 +1712,7 @@ pub fn lasx_xvsrari_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrari_w(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1721,35 +1721,35 @@ pub fn lasx_xvsrari_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrari_d(transmute(a), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrl_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrl_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrl_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrl_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrl_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrl_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrl_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrl_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1758,7 +1758,7 @@ pub fn lasx_xvsrli_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrli_b(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1767,7 +1767,7 @@ pub fn lasx_xvsrli_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrli_h(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1776,7 +1776,7 @@ pub fn lasx_xvsrli_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrli_w(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1785,35 +1785,35 @@ pub fn lasx_xvsrli_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrli_d(transmute(a), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrlr_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlr_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrlr_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlr_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrlr_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlr_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrlr_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlr_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1822,7 +1822,7 @@ pub fn lasx_xvsrlri_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlri_b(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1831,7 +1831,7 @@ pub fn lasx_xvsrlri_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlri_h(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1840,7 +1840,7 @@ pub fn lasx_xvsrlri_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlri_w(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1849,35 +1849,35 @@ pub fn lasx_xvsrlri_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlri_d(transmute(a), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitclr_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitclr_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitclr_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitclr_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitclr_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitclr_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitclr_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitclr_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1886,7 +1886,7 @@ pub fn lasx_xvbitclri_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitclri_b(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1895,7 +1895,7 @@ pub fn lasx_xvbitclri_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitclri_h(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1904,7 +1904,7 @@ pub fn lasx_xvbitclri_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitclri_w(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1913,35 +1913,35 @@ pub fn lasx_xvbitclri_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitclri_d(transmute(a), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitset_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitset_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitset_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitset_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitset_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitset_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitset_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitset_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1950,7 +1950,7 @@ pub fn lasx_xvbitseti_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitseti_b(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1959,7 +1959,7 @@ pub fn lasx_xvbitseti_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitseti_h(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1968,7 +1968,7 @@ pub fn lasx_xvbitseti_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitseti_w(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1977,35 +1977,35 @@ pub fn lasx_xvbitseti_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitseti_d(transmute(a), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitrev_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitrev_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitrev_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitrev_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitrev_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitrev_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitrev_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitrev_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2014,7 +2014,7 @@ pub fn lasx_xvbitrevi_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitrevi_b(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2023,7 +2023,7 @@ pub fn lasx_xvbitrevi_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitrevi_h(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2032,7 +2032,7 @@ pub fn lasx_xvbitrevi_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitrevi_w(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2041,35 +2041,35 @@ pub fn lasx_xvbitrevi_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitrevi_d(transmute(a), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvadd_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvadd_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvadd_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvadd_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvadd_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvadd_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvadd_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvadd_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2078,7 +2078,7 @@ pub fn lasx_xvaddi_bu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvaddi_bu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2087,7 +2087,7 @@ pub fn lasx_xvaddi_hu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvaddi_hu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2096,7 +2096,7 @@ pub fn lasx_xvaddi_wu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvaddi_wu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2105,35 +2105,35 @@ pub fn lasx_xvaddi_du(a: m256i) -> m256i { unsafe { transmute(__lasx_xvaddi_du(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsub_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsub_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsub_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsub_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsub_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsub_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsub_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsub_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2142,7 +2142,7 @@ pub fn lasx_xvsubi_bu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsubi_bu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2151,7 +2151,7 @@ pub fn lasx_xvsubi_hu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsubi_hu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2160,7 +2160,7 @@ pub fn lasx_xvsubi_wu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsubi_wu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2169,35 +2169,35 @@ pub fn lasx_xvsubi_du(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsubi_du(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmax_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmax_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmax_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmax_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmax_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmax_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmax_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmax_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2206,7 +2206,7 @@ pub fn lasx_xvmaxi_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmaxi_b(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2215,7 +2215,7 @@ pub fn lasx_xvmaxi_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmaxi_h(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2224,7 +2224,7 @@ pub fn lasx_xvmaxi_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmaxi_w(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2233,35 +2233,35 @@ pub fn lasx_xvmaxi_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmaxi_d(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmax_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmax_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmax_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmax_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmax_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmax_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmax_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmax_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2270,7 +2270,7 @@ pub fn lasx_xvmaxi_bu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmaxi_bu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2279,7 +2279,7 @@ pub fn lasx_xvmaxi_hu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmaxi_hu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2288,7 +2288,7 @@ pub fn lasx_xvmaxi_wu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmaxi_wu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2297,35 +2297,35 @@ pub fn lasx_xvmaxi_du(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmaxi_du(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmin_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmin_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmin_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmin_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmin_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmin_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmin_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmin_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2334,7 +2334,7 @@ pub fn lasx_xvmini_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmini_b(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2343,7 +2343,7 @@ pub fn lasx_xvmini_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmini_h(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2352,7 +2352,7 @@ pub fn lasx_xvmini_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmini_w(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2361,35 +2361,35 @@ pub fn lasx_xvmini_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmini_d(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmin_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmin_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmin_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmin_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmin_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmin_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmin_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmin_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2398,7 +2398,7 @@ pub fn lasx_xvmini_bu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmini_bu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2407,7 +2407,7 @@ pub fn lasx_xvmini_hu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmini_hu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2416,7 +2416,7 @@ pub fn lasx_xvmini_wu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmini_wu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2425,35 +2425,35 @@ pub fn lasx_xvmini_du(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmini_du(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvseq_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvseq_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvseq_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvseq_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvseq_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvseq_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvseq_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvseq_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2462,7 +2462,7 @@ pub fn lasx_xvseqi_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvseqi_b(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2471,7 +2471,7 @@ pub fn lasx_xvseqi_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvseqi_h(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2480,7 +2480,7 @@ pub fn lasx_xvseqi_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvseqi_w(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2489,35 +2489,35 @@ pub fn lasx_xvseqi_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvseqi_d(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvslt_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvslt_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvslt_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvslt_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvslt_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvslt_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvslt_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvslt_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2526,7 +2526,7 @@ pub fn lasx_xvslti_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvslti_b(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2535,7 +2535,7 @@ pub fn lasx_xvslti_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvslti_h(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2544,7 +2544,7 @@ pub fn lasx_xvslti_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvslti_w(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2553,35 +2553,35 @@ pub fn lasx_xvslti_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvslti_d(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvslt_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvslt_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvslt_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvslt_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvslt_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvslt_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvslt_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvslt_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2590,7 +2590,7 @@ pub fn lasx_xvslti_bu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvslti_bu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2599,7 +2599,7 @@ pub fn lasx_xvslti_hu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvslti_hu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2608,7 +2608,7 @@ pub fn lasx_xvslti_wu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvslti_wu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2617,35 +2617,35 @@ pub fn lasx_xvslti_du(a: m256i) -> m256i { unsafe { transmute(__lasx_xvslti_du(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsle_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsle_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsle_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsle_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsle_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsle_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsle_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsle_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2654,7 +2654,7 @@ pub fn lasx_xvslei_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvslei_b(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2663,7 +2663,7 @@ pub fn lasx_xvslei_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvslei_h(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2672,7 +2672,7 @@ pub fn lasx_xvslei_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvslei_w(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2681,35 +2681,35 @@ pub fn lasx_xvslei_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvslei_d(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsle_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsle_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsle_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsle_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsle_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsle_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsle_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsle_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2718,7 +2718,7 @@ pub fn lasx_xvslei_bu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvslei_bu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2727,7 +2727,7 @@ pub fn lasx_xvslei_hu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvslei_hu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2736,7 +2736,7 @@ pub fn lasx_xvslei_wu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvslei_wu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2745,7 +2745,7 @@ pub fn lasx_xvslei_du(a: m256i) -> m256i { unsafe { transmute(__lasx_xvslei_du(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2754,7 +2754,7 @@ pub fn lasx_xvsat_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsat_b(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2763,7 +2763,7 @@ pub fn lasx_xvsat_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsat_h(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2772,7 +2772,7 @@ pub fn lasx_xvsat_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsat_w(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2781,7 +2781,7 @@ pub fn lasx_xvsat_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsat_d(transmute(a), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2790,7 +2790,7 @@ pub fn lasx_xvsat_bu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsat_bu(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2799,7 +2799,7 @@ pub fn lasx_xvsat_hu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsat_hu(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2808,7 +2808,7 @@ pub fn lasx_xvsat_wu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsat_wu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2817,595 +2817,595 @@ pub fn lasx_xvsat_du(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsat_du(transmute(a), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvadda_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvadda_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvadda_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvadda_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvadda_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvadda_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvadda_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvadda_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsadd_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsadd_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsadd_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsadd_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsadd_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsadd_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsadd_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsadd_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsadd_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsadd_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsadd_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsadd_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsadd_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsadd_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsadd_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsadd_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavg_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavg_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavg_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavg_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavg_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavg_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavg_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavg_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavg_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavg_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavg_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavg_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavg_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavg_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavg_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavg_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavgr_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavgr_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavgr_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavgr_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavgr_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavgr_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavgr_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavgr_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavgr_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavgr_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavgr_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavgr_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavgr_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavgr_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavgr_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavgr_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssub_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssub_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssub_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssub_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssub_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssub_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssub_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssub_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssub_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssub_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssub_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssub_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssub_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssub_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssub_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssub_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvabsd_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvabsd_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvabsd_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvabsd_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvabsd_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvabsd_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvabsd_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvabsd_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvabsd_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvabsd_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvabsd_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvabsd_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvabsd_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvabsd_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvabsd_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvabsd_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmul_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmul_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmul_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmul_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmul_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmul_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmul_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmul_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmadd_b(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmadd_b(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmadd_h(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmadd_h(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmadd_w(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmadd_w(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmadd_d(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmadd_d(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmsub_b(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmsub_b(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmsub_h(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmsub_h(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmsub_w(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmsub_w(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmsub_d(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmsub_d(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvdiv_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvdiv_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvdiv_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvdiv_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvdiv_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvdiv_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvdiv_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvdiv_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvdiv_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvdiv_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvdiv_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvdiv_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvdiv_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvdiv_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvdiv_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvdiv_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhaddw_h_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhaddw_h_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhaddw_w_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhaddw_w_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhaddw_d_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhaddw_d_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhaddw_hu_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhaddw_hu_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhaddw_wu_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhaddw_wu_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhaddw_du_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhaddw_du_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhsubw_h_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhsubw_h_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhsubw_w_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhsubw_w_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhsubw_d_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhsubw_d_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhsubw_hu_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhsubw_hu_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhsubw_wu_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhsubw_wu_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhsubw_du_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhsubw_du_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmod_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmod_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmod_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmod_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmod_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmod_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmod_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmod_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmod_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmod_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmod_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmod_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmod_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmod_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmod_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmod_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3414,7 +3414,7 @@ pub fn lasx_xvrepl128vei_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvrepl128vei_b(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3423,7 +3423,7 @@ pub fn lasx_xvrepl128vei_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvrepl128vei_h(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3432,7 +3432,7 @@ pub fn lasx_xvrepl128vei_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvrepl128vei_w(transmute(a), IMM2)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3441,210 +3441,210 @@ pub fn lasx_xvrepl128vei_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvrepl128vei_d(transmute(a), IMM1)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpickev_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpickev_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpickev_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpickev_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpickev_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpickev_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpickev_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpickev_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpickod_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpickod_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpickod_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpickod_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpickod_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpickod_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpickod_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpickod_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvilvh_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvilvh_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvilvh_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvilvh_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvilvh_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvilvh_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvilvh_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvilvh_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvilvl_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvilvl_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvilvl_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvilvl_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvilvl_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvilvl_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvilvl_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvilvl_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpackev_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpackev_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpackev_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpackev_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpackev_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpackev_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpackev_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpackev_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpackod_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpackod_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpackod_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpackod_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpackod_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpackod_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpackod_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpackod_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvshuf_b(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvshuf_b(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvshuf_h(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvshuf_h(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvshuf_w(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvshuf_w(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvshuf_d(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvshuf_d(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvand_v(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvand_v(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3653,14 +3653,14 @@ pub fn lasx_xvandi_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvandi_b(transmute(a), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvor_v(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvor_v(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3669,14 +3669,14 @@ pub fn lasx_xvori_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvori_b(transmute(a), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvnor_v(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvnor_v(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3685,14 +3685,14 @@ pub fn lasx_xvnori_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvnori_b(transmute(a), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvxor_v(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvxor_v(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3701,14 +3701,14 @@ pub fn lasx_xvxori_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvxori_b(transmute(a), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitsel_v(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvbitsel_v(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3717,7 +3717,7 @@ pub fn lasx_xvbitseli_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitseli_b(transmute(a), transmute(b), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3726,7 +3726,7 @@ pub fn lasx_xvshuf4i_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvshuf4i_b(transmute(a), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3735,7 +3735,7 @@ pub fn lasx_xvshuf4i_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvshuf4i_h(transmute(a), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3744,497 +3744,497 @@ pub fn lasx_xvshuf4i_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvshuf4i_w(transmute(a), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvreplgr2vr_b(a: i32) -> m256i { unsafe { transmute(__lasx_xvreplgr2vr_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvreplgr2vr_h(a: i32) -> m256i { unsafe { transmute(__lasx_xvreplgr2vr_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvreplgr2vr_w(a: i32) -> m256i { unsafe { transmute(__lasx_xvreplgr2vr_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvreplgr2vr_d(a: i64) -> m256i { unsafe { transmute(__lasx_xvreplgr2vr_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpcnt_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvpcnt_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpcnt_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvpcnt_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpcnt_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvpcnt_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpcnt_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvpcnt_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvclo_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvclo_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvclo_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvclo_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvclo_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvclo_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvclo_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvclo_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvclz_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvclz_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvclz_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvclz_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvclz_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvclz_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvclz_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvclz_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfadd_s(a: m256, b: m256) -> m256 { unsafe { transmute(__lasx_xvfadd_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfadd_d(a: m256d, b: m256d) -> m256d { unsafe { transmute(__lasx_xvfadd_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfsub_s(a: m256, b: m256) -> m256 { unsafe { transmute(__lasx_xvfsub_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfsub_d(a: m256d, b: m256d) -> m256d { unsafe { transmute(__lasx_xvfsub_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfmul_s(a: m256, b: m256) -> m256 { unsafe { transmute(__lasx_xvfmul_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfmul_d(a: m256d, b: m256d) -> m256d { unsafe { transmute(__lasx_xvfmul_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfdiv_s(a: m256, b: m256) -> m256 { unsafe { transmute(__lasx_xvfdiv_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfdiv_d(a: m256d, b: m256d) -> m256d { unsafe { transmute(__lasx_xvfdiv_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcvt_h_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcvt_h_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcvt_s_d(a: m256d, b: m256d) -> m256 { unsafe { transmute(__lasx_xvfcvt_s_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfmin_s(a: m256, b: m256) -> m256 { unsafe { transmute(__lasx_xvfmin_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfmin_d(a: m256d, b: m256d) -> m256d { unsafe { transmute(__lasx_xvfmin_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfmina_s(a: m256, b: m256) -> m256 { unsafe { transmute(__lasx_xvfmina_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfmina_d(a: m256d, b: m256d) -> m256d { unsafe { transmute(__lasx_xvfmina_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfmax_s(a: m256, b: m256) -> m256 { unsafe { transmute(__lasx_xvfmax_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfmax_d(a: m256d, b: m256d) -> m256d { unsafe { transmute(__lasx_xvfmax_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfmaxa_s(a: m256, b: m256) -> m256 { unsafe { transmute(__lasx_xvfmaxa_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfmaxa_d(a: m256d, b: m256d) -> m256d { unsafe { transmute(__lasx_xvfmaxa_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfclass_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvfclass_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfclass_d(a: m256d) -> m256i { unsafe { transmute(__lasx_xvfclass_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfsqrt_s(a: m256) -> m256 { unsafe { transmute(__lasx_xvfsqrt_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfsqrt_d(a: m256d) -> m256d { unsafe { transmute(__lasx_xvfsqrt_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrecip_s(a: m256) -> m256 { unsafe { transmute(__lasx_xvfrecip_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrecip_d(a: m256d) -> m256d { unsafe { transmute(__lasx_xvfrecip_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrecipe_s(a: m256) -> m256 { unsafe { transmute(__lasx_xvfrecipe_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrecipe_d(a: m256d) -> m256d { unsafe { transmute(__lasx_xvfrecipe_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrsqrte_s(a: m256) -> m256 { unsafe { transmute(__lasx_xvfrsqrte_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrsqrte_d(a: m256d) -> m256d { unsafe { transmute(__lasx_xvfrsqrte_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrint_s(a: m256) -> m256 { unsafe { transmute(__lasx_xvfrint_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrint_d(a: m256d) -> m256d { unsafe { transmute(__lasx_xvfrint_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrsqrt_s(a: m256) -> m256 { unsafe { transmute(__lasx_xvfrsqrt_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrsqrt_d(a: m256d) -> m256d { unsafe { transmute(__lasx_xvfrsqrt_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvflogb_s(a: m256) -> m256 { unsafe { transmute(__lasx_xvflogb_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvflogb_d(a: m256d) -> m256d { unsafe { transmute(__lasx_xvflogb_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcvth_s_h(a: m256i) -> m256 { unsafe { transmute(__lasx_xvfcvth_s_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcvth_d_s(a: m256) -> m256d { unsafe { transmute(__lasx_xvfcvth_d_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcvtl_s_h(a: m256i) -> m256 { unsafe { transmute(__lasx_xvfcvtl_s_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcvtl_d_s(a: m256) -> m256d { unsafe { transmute(__lasx_xvfcvtl_d_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftint_w_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftint_w_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftint_l_d(a: m256d) -> m256i { unsafe { transmute(__lasx_xvftint_l_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftint_wu_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftint_wu_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftint_lu_d(a: m256d) -> m256i { unsafe { transmute(__lasx_xvftint_lu_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrz_w_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrz_w_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrz_l_d(a: m256d) -> m256i { unsafe { transmute(__lasx_xvftintrz_l_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrz_wu_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrz_wu_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrz_lu_d(a: m256d) -> m256i { unsafe { transmute(__lasx_xvftintrz_lu_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvffint_s_w(a: m256i) -> m256 { unsafe { transmute(__lasx_xvffint_s_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvffint_d_l(a: m256i) -> m256d { unsafe { transmute(__lasx_xvffint_d_l(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvffint_s_wu(a: m256i) -> m256 { unsafe { transmute(__lasx_xvffint_s_wu(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvffint_d_lu(a: m256i) -> m256d { unsafe { transmute(__lasx_xvffint_d_lu(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvreplve_b(a: m256i, b: i32) -> m256i { unsafe { transmute(__lasx_xvreplve_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvreplve_h(a: m256i, b: i32) -> m256i { unsafe { transmute(__lasx_xvreplve_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvreplve_w(a: m256i, b: i32) -> m256i { unsafe { transmute(__lasx_xvreplve_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvreplve_d(a: m256i, b: i32) -> m256i { unsafe { transmute(__lasx_xvreplve_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4243,98 +4243,98 @@ pub fn lasx_xvpermi_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpermi_w(transmute(a), transmute(b), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvandn_v(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvandn_v(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvneg_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvneg_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvneg_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvneg_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvneg_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvneg_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvneg_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvneg_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmuh_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmuh_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmuh_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmuh_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmuh_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmuh_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmuh_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmuh_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmuh_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmuh_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmuh_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmuh_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmuh_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmuh_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmuh_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmuh_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4343,7 +4343,7 @@ pub fn lasx_xvsllwil_h_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsllwil_h_b(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4352,7 +4352,7 @@ pub fn lasx_xvsllwil_w_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsllwil_w_h(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4361,7 +4361,7 @@ pub fn lasx_xvsllwil_d_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsllwil_d_w(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4370,7 +4370,7 @@ pub fn lasx_xvsllwil_hu_bu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsllwil_hu_bu(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4379,7 +4379,7 @@ pub fn lasx_xvsllwil_wu_hu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsllwil_wu_hu(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4388,217 +4388,217 @@ pub fn lasx_xvsllwil_du_wu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsllwil_du_wu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsran_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsran_b_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsran_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsran_h_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsran_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsran_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssran_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssran_b_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssran_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssran_h_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssran_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssran_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssran_bu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssran_bu_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssran_hu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssran_hu_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssran_wu_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssran_wu_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrarn_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrarn_b_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrarn_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrarn_h_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrarn_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrarn_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrarn_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarn_b_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrarn_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarn_h_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrarn_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarn_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrarn_bu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarn_bu_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrarn_hu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarn_hu_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrarn_wu_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarn_wu_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrln_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrln_b_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrln_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrln_h_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrln_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrln_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrln_bu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrln_bu_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrln_hu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrln_hu_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrln_wu_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrln_wu_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrlrn_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlrn_b_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrlrn_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlrn_h_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrlrn_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlrn_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrlrn_bu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrn_bu_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrlrn_hu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrn_hu_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrlrn_wu_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrn_wu_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4607,7 +4607,7 @@ pub fn lasx_xvfrstpi_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvfrstpi_b(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4616,21 +4616,21 @@ pub fn lasx_xvfrstpi_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvfrstpi_h(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrstp_b(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvfrstp_b(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrstp_h(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvfrstp_h(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4639,7 +4639,7 @@ pub fn lasx_xvshuf4i_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvshuf4i_d(transmute(a), transmute(b), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4648,7 +4648,7 @@ pub fn lasx_xvbsrl_v(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbsrl_v(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4657,7 +4657,7 @@ pub fn lasx_xvbsll_v(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbsll_v(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4666,7 +4666,7 @@ pub fn lasx_xvextrins_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvextrins_b(transmute(a), transmute(b), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4675,7 +4675,7 @@ pub fn lasx_xvextrins_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvextrins_h(transmute(a), transmute(b), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4684,7 +4684,7 @@ pub fn lasx_xvextrins_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvextrins_w(transmute(a), transmute(b), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4693,343 +4693,343 @@ pub fn lasx_xvextrins_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvextrins_d(transmute(a), transmute(b), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmskltz_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmskltz_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmskltz_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmskltz_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmskltz_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmskltz_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmskltz_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmskltz_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsigncov_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsigncov_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsigncov_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsigncov_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsigncov_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsigncov_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsigncov_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsigncov_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfmadd_s(a: m256, b: m256, c: m256) -> m256 { unsafe { transmute(__lasx_xvfmadd_s(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfmadd_d(a: m256d, b: m256d, c: m256d) -> m256d { unsafe { transmute(__lasx_xvfmadd_d(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfmsub_s(a: m256, b: m256, c: m256) -> m256 { unsafe { transmute(__lasx_xvfmsub_s(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfmsub_d(a: m256d, b: m256d, c: m256d) -> m256d { unsafe { transmute(__lasx_xvfmsub_d(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfnmadd_s(a: m256, b: m256, c: m256) -> m256 { unsafe { transmute(__lasx_xvfnmadd_s(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfnmadd_d(a: m256d, b: m256d, c: m256d) -> m256d { unsafe { transmute(__lasx_xvfnmadd_d(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfnmsub_s(a: m256, b: m256, c: m256) -> m256 { unsafe { transmute(__lasx_xvfnmsub_s(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfnmsub_d(a: m256d, b: m256d, c: m256d) -> m256d { unsafe { transmute(__lasx_xvfnmsub_d(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrne_w_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrne_w_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrne_l_d(a: m256d) -> m256i { unsafe { transmute(__lasx_xvftintrne_l_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrp_w_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrp_w_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrp_l_d(a: m256d) -> m256i { unsafe { transmute(__lasx_xvftintrp_l_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrm_w_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrm_w_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrm_l_d(a: m256d) -> m256i { unsafe { transmute(__lasx_xvftintrm_l_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftint_w_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvftint_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvffint_s_l(a: m256i, b: m256i) -> m256 { unsafe { transmute(__lasx_xvffint_s_l(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrz_w_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvftintrz_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrp_w_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvftintrp_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrm_w_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvftintrm_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrne_w_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvftintrne_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftinth_l_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftinth_l_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintl_l_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintl_l_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvffinth_d_w(a: m256i) -> m256d { unsafe { transmute(__lasx_xvffinth_d_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvffintl_d_w(a: m256i) -> m256d { unsafe { transmute(__lasx_xvffintl_d_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrzh_l_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrzh_l_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrzl_l_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrzl_l_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrph_l_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrph_l_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrpl_l_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrpl_l_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrmh_l_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrmh_l_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrml_l_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrml_l_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrneh_l_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrneh_l_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrnel_l_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrnel_l_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrintrne_s(a: m256) -> m256 { unsafe { transmute(__lasx_xvfrintrne_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrintrne_d(a: m256d) -> m256d { unsafe { transmute(__lasx_xvfrintrne_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrintrz_s(a: m256) -> m256 { unsafe { transmute(__lasx_xvfrintrz_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrintrz_d(a: m256d) -> m256d { unsafe { transmute(__lasx_xvfrintrz_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrintrp_s(a: m256) -> m256 { unsafe { transmute(__lasx_xvfrintrp_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrintrp_d(a: m256d) -> m256d { unsafe { transmute(__lasx_xvfrintrp_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrintrm_s(a: m256) -> m256 { unsafe { transmute(__lasx_xvfrintrm_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrintrm_d(a: m256d) -> m256d { unsafe { transmute(__lasx_xvfrintrm_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5038,7 +5038,7 @@ pub unsafe fn lasx_xvld(mem_addr: *const i8) -> m256i { transmute(__lasx_xvld(mem_addr, IMM_S12)) } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5047,7 +5047,7 @@ pub unsafe fn lasx_xvst(a: m256i, mem_addr: *mut i8) { transmute(__lasx_xvst(transmute(a), mem_addr, IMM_S12)) } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2, 3)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5057,7 +5057,7 @@ pub unsafe fn lasx_xvstelm_b(a: m256i, mem_a transmute(__lasx_xvstelm_b(transmute(a), mem_addr, IMM_S8, IMM4)) } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2, 3)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5067,7 +5067,7 @@ pub unsafe fn lasx_xvstelm_h(a: m256i, mem_a transmute(__lasx_xvstelm_h(transmute(a), mem_addr, IMM_S8, IMM3)) } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2, 3)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5077,7 +5077,7 @@ pub unsafe fn lasx_xvstelm_w(a: m256i, mem_a transmute(__lasx_xvstelm_w(transmute(a), mem_addr, IMM_S8, IMM2)) } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2, 3)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5087,7 +5087,7 @@ pub unsafe fn lasx_xvstelm_d(a: m256i, mem_a transmute(__lasx_xvstelm_d(transmute(a), mem_addr, IMM_S8, IMM1)) } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5096,7 +5096,7 @@ pub fn lasx_xvinsve0_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvinsve0_w(transmute(a), transmute(b), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5105,7 +5105,7 @@ pub fn lasx_xvinsve0_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvinsve0_d(transmute(a), transmute(b), IMM2)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5114,7 +5114,7 @@ pub fn lasx_xvpickve_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvpickve_w(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5123,56 +5123,56 @@ pub fn lasx_xvpickve_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvpickve_d(transmute(a), IMM2)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrlrn_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrn_b_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrlrn_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrn_h_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrlrn_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrn_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrln_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrln_b_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrln_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrln_h_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrln_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrln_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvorn_v(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvorn_v(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5181,28 +5181,28 @@ pub fn lasx_xvldi() -> m256i { unsafe { transmute(__lasx_xvldi(IMM_S13)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn lasx_xvldx(mem_addr: *const i8, b: i64) -> m256i { transmute(__lasx_xvldx(mem_addr, transmute(b))) } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn lasx_xvstx(a: m256i, mem_addr: *mut i8, b: i64) { transmute(__lasx_xvstx(transmute(a), mem_addr, transmute(b))) } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvextl_qu_du(a: m256i) -> m256i { unsafe { transmute(__lasx_xvextl_qu_du(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5211,7 +5211,7 @@ pub fn lasx_xvinsgr2vr_w(a: m256i, b: i32) -> m256i { unsafe { transmute(__lasx_xvinsgr2vr_w(transmute(a), transmute(b), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5220,126 +5220,126 @@ pub fn lasx_xvinsgr2vr_d(a: m256i, b: i64) -> m256i { unsafe { transmute(__lasx_xvinsgr2vr_d(transmute(a), transmute(b), IMM2)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvreplve0_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvreplve0_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvreplve0_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvreplve0_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvreplve0_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvreplve0_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvreplve0_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvreplve0_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvreplve0_q(a: m256i) -> m256i { unsafe { transmute(__lasx_xvreplve0_q(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_h_b(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_h_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_w_h(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_w_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_d_w(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_d_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_w_b(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_w_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_d_h(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_d_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_d_b(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_d_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_hu_bu(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_hu_bu(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_wu_hu(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_wu_hu(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_du_wu(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_du_wu(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_wu_bu(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_wu_bu(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_du_hu(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_du_hu(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_du_bu(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_du_bu(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5348,7 +5348,7 @@ pub fn lasx_xvpermi_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpermi_q(transmute(a), transmute(b), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5357,14 +5357,14 @@ pub fn lasx_xvpermi_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvpermi_d(transmute(a), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvperm_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvperm_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5373,7 +5373,7 @@ pub unsafe fn lasx_xvldrepl_b(mem_addr: *const i8) -> m256i transmute(__lasx_xvldrepl_b(mem_addr, IMM_S12)) } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5382,7 +5382,7 @@ pub unsafe fn lasx_xvldrepl_h(mem_addr: *const i8) -> m256i transmute(__lasx_xvldrepl_h(mem_addr, IMM_S11)) } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5391,7 +5391,7 @@ pub unsafe fn lasx_xvldrepl_w(mem_addr: *const i8) -> m256i transmute(__lasx_xvldrepl_w(mem_addr, IMM_S10)) } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5400,7 +5400,7 @@ pub unsafe fn lasx_xvldrepl_d(mem_addr: *const i8) -> m256i { transmute(__lasx_xvldrepl_d(mem_addr, IMM_S9)) } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5409,7 +5409,7 @@ pub fn lasx_xvpickve2gr_w(a: m256i) -> i32 { unsafe { transmute(__lasx_xvpickve2gr_w(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5418,7 +5418,7 @@ pub fn lasx_xvpickve2gr_wu(a: m256i) -> u32 { unsafe { transmute(__lasx_xvpickve2gr_wu(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5427,7 +5427,7 @@ pub fn lasx_xvpickve2gr_d(a: m256i) -> i64 { unsafe { transmute(__lasx_xvpickve2gr_d(transmute(a), IMM2)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5436,763 +5436,763 @@ pub fn lasx_xvpickve2gr_du(a: m256i) -> u64 { unsafe { transmute(__lasx_xvpickve2gr_du(transmute(a), IMM2)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_q_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_q_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_d_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_d_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_w_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_w_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_h_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_h_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_q_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_q_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_d_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_d_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_w_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_w_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_h_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_h_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwev_q_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwev_q_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwev_d_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwev_d_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwev_w_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwev_w_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwev_h_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwev_h_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwev_q_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwev_q_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwev_d_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwev_d_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwev_w_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwev_w_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwev_h_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwev_h_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_q_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_q_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_d_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_d_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_w_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_w_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_h_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_h_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_q_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_q_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_d_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_d_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_w_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_w_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_h_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_h_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_q_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_q_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_d_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_d_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_w_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_w_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_h_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_h_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_q_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_q_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_d_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_d_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_w_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_w_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_h_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_h_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwod_q_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwod_q_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwod_d_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwod_d_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwod_w_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwod_w_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwod_h_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwod_h_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwod_q_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwod_q_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwod_d_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwod_d_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwod_w_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwod_w_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwod_h_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwod_h_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_q_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_q_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_d_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_d_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_w_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_w_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_h_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_h_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_q_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_q_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_d_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_d_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_w_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_w_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_h_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_h_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_d_wu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_d_wu_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_w_hu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_w_hu_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_h_bu_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_h_bu_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_d_wu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_d_wu_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_w_hu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_w_hu_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_h_bu_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_h_bu_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_d_wu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_d_wu_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_w_hu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_w_hu_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_h_bu_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_h_bu_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_d_wu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_d_wu_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_w_hu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_w_hu_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_h_bu_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_h_bu_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhaddw_q_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhaddw_q_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhaddw_qu_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhaddw_qu_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhsubw_q_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhsubw_q_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhsubw_qu_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhsubw_qu_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_q_d(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_q_d(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_d_w(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_d_w(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_w_h(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_w_h(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_h_b(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_h_b(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_q_du(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_q_du(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_d_wu(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_d_wu(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_w_hu(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_w_hu(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_h_bu(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_h_bu(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_q_d(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_q_d(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_d_w(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_d_w(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_w_h(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_w_h(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_h_b(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_h_b(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_q_du(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_q_du(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_d_wu(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_d_wu(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_w_hu(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_w_hu(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_h_bu(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_h_bu(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_q_du_d(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_q_du_d(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_d_wu_w(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_d_wu_w(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_w_hu_h(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_w_hu_h(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_h_bu_b(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_h_bu_b(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_q_du_d(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_q_du_d(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_d_wu_w(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_d_wu_w(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_w_hu_h(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_w_hu_h(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_h_bu_b(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_h_bu_b(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvrotr_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvrotr_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvrotr_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvrotr_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvrotr_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvrotr_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvrotr_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvrotr_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvadd_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvadd_q(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsub_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsub_q(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_q_du_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_q_du_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_q_du_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_q_du_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_q_du_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_q_du_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_q_du_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_q_du_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmskgez_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmskgez_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmsknz_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmsknz_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvexth_h_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvexth_h_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvexth_w_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvexth_w_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvexth_d_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvexth_d_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvexth_q_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvexth_q_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvexth_hu_bu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvexth_hu_bu(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvexth_wu_hu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvexth_wu_hu(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvexth_du_wu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvexth_du_wu(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvexth_qu_du(a: m256i) -> m256i { unsafe { transmute(__lasx_xvexth_qu_du(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6201,7 +6201,7 @@ pub fn lasx_xvrotri_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvrotri_b(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6210,7 +6210,7 @@ pub fn lasx_xvrotri_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvrotri_h(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6219,7 +6219,7 @@ pub fn lasx_xvrotri_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvrotri_w(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6228,14 +6228,14 @@ pub fn lasx_xvrotri_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvrotri_d(transmute(a), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvextl_q_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvextl_q_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6244,7 +6244,7 @@ pub fn lasx_xvsrlni_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6253,7 +6253,7 @@ pub fn lasx_xvsrlni_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6262,7 +6262,7 @@ pub fn lasx_xvsrlni_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6271,7 +6271,7 @@ pub fn lasx_xvsrlni_d_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6280,7 +6280,7 @@ pub fn lasx_xvsrlrni_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlrni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6289,7 +6289,7 @@ pub fn lasx_xvsrlrni_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlrni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6298,7 +6298,7 @@ pub fn lasx_xvsrlrni_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlrni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6307,7 +6307,7 @@ pub fn lasx_xvsrlrni_d_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlrni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6316,7 +6316,7 @@ pub fn lasx_xvssrlni_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6325,7 +6325,7 @@ pub fn lasx_xvssrlni_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6334,7 +6334,7 @@ pub fn lasx_xvssrlni_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6343,7 +6343,7 @@ pub fn lasx_xvssrlni_d_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6352,7 +6352,7 @@ pub fn lasx_xvssrlni_bu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlni_bu_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6361,7 +6361,7 @@ pub fn lasx_xvssrlni_hu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlni_hu_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6370,7 +6370,7 @@ pub fn lasx_xvssrlni_wu_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlni_wu_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6379,7 +6379,7 @@ pub fn lasx_xvssrlni_du_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlni_du_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6388,7 +6388,7 @@ pub fn lasx_xvssrlrni_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6397,7 +6397,7 @@ pub fn lasx_xvssrlrni_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6406,7 +6406,7 @@ pub fn lasx_xvssrlrni_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6415,7 +6415,7 @@ pub fn lasx_xvssrlrni_d_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6424,7 +6424,7 @@ pub fn lasx_xvssrlrni_bu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrni_bu_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6433,7 +6433,7 @@ pub fn lasx_xvssrlrni_hu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrni_hu_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6442,7 +6442,7 @@ pub fn lasx_xvssrlrni_wu_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrni_wu_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6451,7 +6451,7 @@ pub fn lasx_xvssrlrni_du_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrni_du_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6460,7 +6460,7 @@ pub fn lasx_xvsrani_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrani_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6469,7 +6469,7 @@ pub fn lasx_xvsrani_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrani_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6478,7 +6478,7 @@ pub fn lasx_xvsrani_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrani_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6487,7 +6487,7 @@ pub fn lasx_xvsrani_d_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrani_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6496,7 +6496,7 @@ pub fn lasx_xvsrarni_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrarni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6505,7 +6505,7 @@ pub fn lasx_xvsrarni_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrarni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6514,7 +6514,7 @@ pub fn lasx_xvsrarni_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrarni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6523,7 +6523,7 @@ pub fn lasx_xvsrarni_d_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrarni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6532,7 +6532,7 @@ pub fn lasx_xvssrani_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrani_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6541,7 +6541,7 @@ pub fn lasx_xvssrani_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrani_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6550,7 +6550,7 @@ pub fn lasx_xvssrani_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrani_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6559,7 +6559,7 @@ pub fn lasx_xvssrani_d_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrani_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6568,7 +6568,7 @@ pub fn lasx_xvssrani_bu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrani_bu_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6577,7 +6577,7 @@ pub fn lasx_xvssrani_hu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrani_hu_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6586,7 +6586,7 @@ pub fn lasx_xvssrani_wu_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrani_wu_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6595,7 +6595,7 @@ pub fn lasx_xvssrani_du_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrani_du_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6604,7 +6604,7 @@ pub fn lasx_xvssrarni_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6613,7 +6613,7 @@ pub fn lasx_xvssrarni_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6622,7 +6622,7 @@ pub fn lasx_xvssrarni_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6631,7 +6631,7 @@ pub fn lasx_xvssrarni_d_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6640,7 +6640,7 @@ pub fn lasx_xvssrarni_bu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarni_bu_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6649,7 +6649,7 @@ pub fn lasx_xvssrarni_hu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarni_hu_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6658,7 +6658,7 @@ pub fn lasx_xvssrarni_wu_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarni_wu_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6667,385 +6667,385 @@ pub fn lasx_xvssrarni_du_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarni_du_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xbnz_b(a: m256i) -> i32 { unsafe { transmute(__lasx_xbnz_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xbnz_d(a: m256i) -> i32 { unsafe { transmute(__lasx_xbnz_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xbnz_h(a: m256i) -> i32 { unsafe { transmute(__lasx_xbnz_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xbnz_v(a: m256i) -> i32 { unsafe { transmute(__lasx_xbnz_v(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xbnz_w(a: m256i) -> i32 { unsafe { transmute(__lasx_xbnz_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xbz_b(a: m256i) -> i32 { unsafe { transmute(__lasx_xbz_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xbz_d(a: m256i) -> i32 { unsafe { transmute(__lasx_xbz_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xbz_h(a: m256i) -> i32 { unsafe { transmute(__lasx_xbz_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xbz_v(a: m256i) -> i32 { unsafe { transmute(__lasx_xbz_v(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xbz_w(a: m256i) -> i32 { unsafe { transmute(__lasx_xbz_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_caf_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_caf_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_caf_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_caf_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_ceq_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_ceq_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_ceq_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_ceq_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cle_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_cle_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cle_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_cle_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_clt_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_clt_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_clt_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_clt_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cne_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_cne_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cne_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_cne_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cor_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_cor_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cor_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_cor_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cueq_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_cueq_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cueq_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_cueq_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cule_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_cule_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cule_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_cule_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cult_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_cult_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cult_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_cult_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cun_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_cun_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cune_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_cune_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cune_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_cune_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cun_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_cun_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_saf_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_saf_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_saf_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_saf_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_seq_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_seq_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_seq_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_seq_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sle_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_sle_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sle_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_sle_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_slt_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_slt_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_slt_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_slt_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sne_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_sne_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sne_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_sne_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sor_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_sor_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sor_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_sor_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sueq_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_sueq_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sueq_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_sueq_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sule_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_sule_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sule_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_sule_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sult_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_sult_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sult_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_sult_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sun_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_sun_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sune_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_sune_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sune_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_sune_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sun_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_sun_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -7054,7 +7054,7 @@ pub fn lasx_xvpickve_d_f(a: m256d) -> m256d { unsafe { transmute(__lasx_xvpickve_d_f(transmute(a), IMM2)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -7063,7 +7063,7 @@ pub fn lasx_xvpickve_w_f(a: m256) -> m256 { unsafe { transmute(__lasx_xvpickve_w_f(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -7072,7 +7072,7 @@ pub fn lasx_xvrepli_b() -> m256i { unsafe { transmute(__lasx_xvrepli_b(IMM_S10)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -7081,7 +7081,7 @@ pub fn lasx_xvrepli_d() -> m256i { unsafe { transmute(__lasx_xvrepli_d(IMM_S10)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -7090,7 +7090,7 @@ pub fn lasx_xvrepli_h() -> m256i { unsafe { transmute(__lasx_xvrepli_h(IMM_S10)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -7099,126 +7099,126 @@ pub fn lasx_xvrepli_w() -> m256i { unsafe { transmute(__lasx_xvrepli_w(IMM_S10)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_cast_128_s(a: m128) -> m256 { unsafe { transmute(__lasx_cast_128_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_cast_128_d(a: m128d) -> m256d { unsafe { transmute(__lasx_cast_128_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_cast_128(a: m128i) -> m256i { unsafe { transmute(__lasx_cast_128(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_concat_128_s(a: m128, b: m128) -> m256 { unsafe { transmute(__lasx_concat_128_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_concat_128_d(a: m128d, b: m128d) -> m256d { unsafe { transmute(__lasx_concat_128_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_concat_128(a: m128i, b: m128i) -> m256i { unsafe { transmute(__lasx_concat_128(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_extract_128_lo_s(a: m256) -> m128 { unsafe { transmute(__lasx_extract_128_lo_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_extract_128_hi_s(a: m256) -> m128 { unsafe { transmute(__lasx_extract_128_hi_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_extract_128_lo_d(a: m256d) -> m128d { unsafe { transmute(__lasx_extract_128_lo_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_extract_128_hi_d(a: m256d) -> m128d { unsafe { transmute(__lasx_extract_128_hi_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_extract_128_lo(a: m256i) -> m128i { unsafe { transmute(__lasx_extract_128_lo(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_extract_128_hi(a: m256i) -> m128i { unsafe { transmute(__lasx_extract_128_hi(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_insert_128_lo_s(a: m256, b: m128) -> m256 { unsafe { transmute(__lasx_insert_128_lo_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_insert_128_hi_s(a: m256, b: m128) -> m256 { unsafe { transmute(__lasx_insert_128_hi_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_insert_128_lo_d(a: m256d, b: m128d) -> m256d { unsafe { transmute(__lasx_insert_128_lo_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_insert_128_hi_d(a: m256d, b: m128d) -> m256d { unsafe { transmute(__lasx_insert_128_hi_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_insert_128_lo(a: m256i, b: m128i) -> m256i { unsafe { transmute(__lasx_insert_128_lo(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_insert_128_hi(a: m256i, b: m128i) -> m256i { diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs b/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs index 25efaadb42880..679c82079cb88 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs @@ -1453,35 +1453,35 @@ unsafe extern "unadjusted" { fn __lsx_vrepli_w(a: i32) -> __v4i32; } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsll_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsll_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsll_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsll_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsll_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsll_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsll_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsll_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1490,7 +1490,7 @@ pub fn lsx_vslli_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vslli_b(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1499,7 +1499,7 @@ pub fn lsx_vslli_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vslli_h(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1508,7 +1508,7 @@ pub fn lsx_vslli_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vslli_w(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1517,35 +1517,35 @@ pub fn lsx_vslli_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vslli_d(transmute(a), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsra_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsra_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsra_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsra_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsra_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsra_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsra_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsra_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1554,7 +1554,7 @@ pub fn lsx_vsrai_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrai_b(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1563,7 +1563,7 @@ pub fn lsx_vsrai_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrai_h(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1572,7 +1572,7 @@ pub fn lsx_vsrai_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrai_w(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1581,35 +1581,35 @@ pub fn lsx_vsrai_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrai_d(transmute(a), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrar_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrar_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrar_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrar_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrar_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrar_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrar_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrar_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1618,7 +1618,7 @@ pub fn lsx_vsrari_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrari_b(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1627,7 +1627,7 @@ pub fn lsx_vsrari_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrari_h(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1636,7 +1636,7 @@ pub fn lsx_vsrari_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrari_w(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1645,35 +1645,35 @@ pub fn lsx_vsrari_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrari_d(transmute(a), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrl_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrl_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrl_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrl_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrl_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrl_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrl_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrl_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1682,7 +1682,7 @@ pub fn lsx_vsrli_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrli_b(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1691,7 +1691,7 @@ pub fn lsx_vsrli_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrli_h(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1700,7 +1700,7 @@ pub fn lsx_vsrli_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrli_w(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1709,35 +1709,35 @@ pub fn lsx_vsrli_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrli_d(transmute(a), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrlr_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlr_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrlr_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlr_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrlr_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlr_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrlr_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlr_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1746,7 +1746,7 @@ pub fn lsx_vsrlri_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrlri_b(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1755,7 +1755,7 @@ pub fn lsx_vsrlri_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrlri_h(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1764,7 +1764,7 @@ pub fn lsx_vsrlri_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrlri_w(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1773,35 +1773,35 @@ pub fn lsx_vsrlri_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrlri_d(transmute(a), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitclr_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitclr_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitclr_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitclr_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitclr_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitclr_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitclr_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitclr_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1810,7 +1810,7 @@ pub fn lsx_vbitclri_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitclri_b(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1819,7 +1819,7 @@ pub fn lsx_vbitclri_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitclri_h(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1828,7 +1828,7 @@ pub fn lsx_vbitclri_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitclri_w(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1837,35 +1837,35 @@ pub fn lsx_vbitclri_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitclri_d(transmute(a), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitset_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitset_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitset_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitset_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitset_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitset_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitset_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitset_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1874,7 +1874,7 @@ pub fn lsx_vbitseti_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitseti_b(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1883,7 +1883,7 @@ pub fn lsx_vbitseti_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitseti_h(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1892,7 +1892,7 @@ pub fn lsx_vbitseti_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitseti_w(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1901,35 +1901,35 @@ pub fn lsx_vbitseti_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitseti_d(transmute(a), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitrev_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitrev_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitrev_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitrev_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitrev_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitrev_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitrev_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitrev_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1938,7 +1938,7 @@ pub fn lsx_vbitrevi_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitrevi_b(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1947,7 +1947,7 @@ pub fn lsx_vbitrevi_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitrevi_h(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1956,7 +1956,7 @@ pub fn lsx_vbitrevi_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitrevi_w(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1965,35 +1965,35 @@ pub fn lsx_vbitrevi_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitrevi_d(transmute(a), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vadd_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vadd_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vadd_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vadd_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vadd_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vadd_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vadd_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vadd_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2002,7 +2002,7 @@ pub fn lsx_vaddi_bu(a: m128i) -> m128i { unsafe { transmute(__lsx_vaddi_bu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2011,7 +2011,7 @@ pub fn lsx_vaddi_hu(a: m128i) -> m128i { unsafe { transmute(__lsx_vaddi_hu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2020,7 +2020,7 @@ pub fn lsx_vaddi_wu(a: m128i) -> m128i { unsafe { transmute(__lsx_vaddi_wu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2029,35 +2029,35 @@ pub fn lsx_vaddi_du(a: m128i) -> m128i { unsafe { transmute(__lsx_vaddi_du(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsub_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsub_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsub_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsub_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsub_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsub_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsub_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsub_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2066,7 +2066,7 @@ pub fn lsx_vsubi_bu(a: m128i) -> m128i { unsafe { transmute(__lsx_vsubi_bu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2075,7 +2075,7 @@ pub fn lsx_vsubi_hu(a: m128i) -> m128i { unsafe { transmute(__lsx_vsubi_hu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2084,7 +2084,7 @@ pub fn lsx_vsubi_wu(a: m128i) -> m128i { unsafe { transmute(__lsx_vsubi_wu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2093,35 +2093,35 @@ pub fn lsx_vsubi_du(a: m128i) -> m128i { unsafe { transmute(__lsx_vsubi_du(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmax_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmax_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmax_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmax_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmax_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmax_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmax_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmax_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2130,7 +2130,7 @@ pub fn lsx_vmaxi_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vmaxi_b(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2139,7 +2139,7 @@ pub fn lsx_vmaxi_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vmaxi_h(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2148,7 +2148,7 @@ pub fn lsx_vmaxi_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vmaxi_w(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2157,35 +2157,35 @@ pub fn lsx_vmaxi_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vmaxi_d(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmax_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmax_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmax_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmax_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmax_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmax_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmax_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmax_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2194,7 +2194,7 @@ pub fn lsx_vmaxi_bu(a: m128i) -> m128i { unsafe { transmute(__lsx_vmaxi_bu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2203,7 +2203,7 @@ pub fn lsx_vmaxi_hu(a: m128i) -> m128i { unsafe { transmute(__lsx_vmaxi_hu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2212,7 +2212,7 @@ pub fn lsx_vmaxi_wu(a: m128i) -> m128i { unsafe { transmute(__lsx_vmaxi_wu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2221,35 +2221,35 @@ pub fn lsx_vmaxi_du(a: m128i) -> m128i { unsafe { transmute(__lsx_vmaxi_du(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmin_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmin_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmin_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmin_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmin_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmin_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmin_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmin_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2258,7 +2258,7 @@ pub fn lsx_vmini_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vmini_b(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2267,7 +2267,7 @@ pub fn lsx_vmini_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vmini_h(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2276,7 +2276,7 @@ pub fn lsx_vmini_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vmini_w(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2285,35 +2285,35 @@ pub fn lsx_vmini_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vmini_d(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmin_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmin_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmin_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmin_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmin_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmin_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmin_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmin_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2322,7 +2322,7 @@ pub fn lsx_vmini_bu(a: m128i) -> m128i { unsafe { transmute(__lsx_vmini_bu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2331,7 +2331,7 @@ pub fn lsx_vmini_hu(a: m128i) -> m128i { unsafe { transmute(__lsx_vmini_hu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2340,7 +2340,7 @@ pub fn lsx_vmini_wu(a: m128i) -> m128i { unsafe { transmute(__lsx_vmini_wu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2349,35 +2349,35 @@ pub fn lsx_vmini_du(a: m128i) -> m128i { unsafe { transmute(__lsx_vmini_du(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vseq_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vseq_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vseq_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vseq_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vseq_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vseq_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vseq_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vseq_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2386,7 +2386,7 @@ pub fn lsx_vseqi_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vseqi_b(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2395,7 +2395,7 @@ pub fn lsx_vseqi_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vseqi_h(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2404,7 +2404,7 @@ pub fn lsx_vseqi_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vseqi_w(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2413,7 +2413,7 @@ pub fn lsx_vseqi_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vseqi_d(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2422,35 +2422,35 @@ pub fn lsx_vslti_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vslti_b(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vslt_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vslt_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vslt_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vslt_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vslt_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vslt_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vslt_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vslt_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2459,7 +2459,7 @@ pub fn lsx_vslti_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vslti_h(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2468,7 +2468,7 @@ pub fn lsx_vslti_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vslti_w(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2477,35 +2477,35 @@ pub fn lsx_vslti_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vslti_d(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vslt_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vslt_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vslt_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vslt_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vslt_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vslt_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vslt_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vslt_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2514,7 +2514,7 @@ pub fn lsx_vslti_bu(a: m128i) -> m128i { unsafe { transmute(__lsx_vslti_bu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2523,7 +2523,7 @@ pub fn lsx_vslti_hu(a: m128i) -> m128i { unsafe { transmute(__lsx_vslti_hu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2532,7 +2532,7 @@ pub fn lsx_vslti_wu(a: m128i) -> m128i { unsafe { transmute(__lsx_vslti_wu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2541,35 +2541,35 @@ pub fn lsx_vslti_du(a: m128i) -> m128i { unsafe { transmute(__lsx_vslti_du(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsle_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsle_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsle_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsle_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsle_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsle_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsle_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsle_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2578,7 +2578,7 @@ pub fn lsx_vslei_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vslei_b(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2587,7 +2587,7 @@ pub fn lsx_vslei_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vslei_h(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2596,7 +2596,7 @@ pub fn lsx_vslei_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vslei_w(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2605,35 +2605,35 @@ pub fn lsx_vslei_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vslei_d(transmute(a), IMM_S5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsle_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsle_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsle_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsle_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsle_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsle_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsle_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsle_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2642,7 +2642,7 @@ pub fn lsx_vslei_bu(a: m128i) -> m128i { unsafe { transmute(__lsx_vslei_bu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2651,7 +2651,7 @@ pub fn lsx_vslei_hu(a: m128i) -> m128i { unsafe { transmute(__lsx_vslei_hu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2660,7 +2660,7 @@ pub fn lsx_vslei_wu(a: m128i) -> m128i { unsafe { transmute(__lsx_vslei_wu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2669,7 +2669,7 @@ pub fn lsx_vslei_du(a: m128i) -> m128i { unsafe { transmute(__lsx_vslei_du(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2678,7 +2678,7 @@ pub fn lsx_vsat_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vsat_b(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2687,7 +2687,7 @@ pub fn lsx_vsat_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vsat_h(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2696,7 +2696,7 @@ pub fn lsx_vsat_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vsat_w(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2705,7 +2705,7 @@ pub fn lsx_vsat_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vsat_d(transmute(a), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2714,7 +2714,7 @@ pub fn lsx_vsat_bu(a: m128i) -> m128i { unsafe { transmute(__lsx_vsat_bu(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2723,7 +2723,7 @@ pub fn lsx_vsat_hu(a: m128i) -> m128i { unsafe { transmute(__lsx_vsat_hu(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2732,7 +2732,7 @@ pub fn lsx_vsat_wu(a: m128i) -> m128i { unsafe { transmute(__lsx_vsat_wu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2741,623 +2741,623 @@ pub fn lsx_vsat_du(a: m128i) -> m128i { unsafe { transmute(__lsx_vsat_du(transmute(a), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vadda_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vadda_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vadda_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vadda_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vadda_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vadda_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vadda_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vadda_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsadd_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsadd_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsadd_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsadd_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsadd_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsadd_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsadd_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsadd_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsadd_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsadd_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsadd_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsadd_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsadd_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsadd_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsadd_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsadd_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavg_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavg_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavg_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavg_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavg_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavg_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavg_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavg_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavg_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavg_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavg_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavg_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavg_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavg_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavg_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavg_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavgr_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavgr_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavgr_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavgr_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavgr_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavgr_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavgr_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavgr_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavgr_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavgr_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavgr_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavgr_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavgr_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavgr_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavgr_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavgr_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssub_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssub_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssub_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssub_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssub_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssub_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssub_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssub_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssub_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssub_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssub_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssub_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssub_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssub_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssub_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssub_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vabsd_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vabsd_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vabsd_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vabsd_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vabsd_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vabsd_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vabsd_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vabsd_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vabsd_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vabsd_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vabsd_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vabsd_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vabsd_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vabsd_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vabsd_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vabsd_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmul_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmul_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmul_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmul_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmul_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmul_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmul_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmul_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmadd_b(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmadd_b(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmadd_h(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmadd_h(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmadd_w(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmadd_w(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmadd_d(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmadd_d(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmsub_b(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmsub_b(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmsub_h(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmsub_h(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmsub_w(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmsub_w(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmsub_d(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmsub_d(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vdiv_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vdiv_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vdiv_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vdiv_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vdiv_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vdiv_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vdiv_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vdiv_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vdiv_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vdiv_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vdiv_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vdiv_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vdiv_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vdiv_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vdiv_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vdiv_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhaddw_h_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhaddw_h_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhaddw_w_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhaddw_w_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhaddw_d_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhaddw_d_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhaddw_hu_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhaddw_hu_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhaddw_wu_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhaddw_wu_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhaddw_du_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhaddw_du_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhsubw_h_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhsubw_h_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhsubw_w_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhsubw_w_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhsubw_d_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhsubw_d_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhsubw_hu_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhsubw_hu_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhsubw_wu_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhsubw_wu_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhsubw_du_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhsubw_du_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmod_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmod_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmod_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmod_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmod_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmod_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmod_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmod_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmod_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmod_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmod_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmod_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmod_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmod_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmod_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmod_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vreplve_b(a: m128i, b: i32) -> m128i { unsafe { transmute(__lsx_vreplve_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vreplve_h(a: m128i, b: i32) -> m128i { unsafe { transmute(__lsx_vreplve_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vreplve_w(a: m128i, b: i32) -> m128i { unsafe { transmute(__lsx_vreplve_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vreplve_d(a: m128i, b: i32) -> m128i { unsafe { transmute(__lsx_vreplve_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3366,7 +3366,7 @@ pub fn lsx_vreplvei_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vreplvei_b(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3375,7 +3375,7 @@ pub fn lsx_vreplvei_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vreplvei_h(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3384,7 +3384,7 @@ pub fn lsx_vreplvei_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vreplvei_w(transmute(a), IMM2)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3393,203 +3393,203 @@ pub fn lsx_vreplvei_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vreplvei_d(transmute(a), IMM1)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpickev_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpickev_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpickev_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpickev_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpickev_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpickev_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpickev_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpickev_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpickod_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpickod_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpickod_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpickod_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpickod_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpickod_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpickod_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpickod_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vilvh_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vilvh_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vilvh_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vilvh_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vilvh_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vilvh_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vilvh_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vilvh_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vilvl_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vilvl_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vilvl_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vilvl_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vilvl_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vilvl_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vilvl_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vilvl_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpackev_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpackev_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpackev_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpackev_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpackev_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpackev_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpackev_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpackev_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpackod_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpackod_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpackod_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpackod_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpackod_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpackod_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpackod_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpackod_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vshuf_h(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vshuf_h(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vshuf_w(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vshuf_w(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vshuf_d(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vshuf_d(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vand_v(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vand_v(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3598,14 +3598,14 @@ pub fn lsx_vandi_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vandi_b(transmute(a), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vor_v(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vor_v(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3614,14 +3614,14 @@ pub fn lsx_vori_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vori_b(transmute(a), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vnor_v(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vnor_v(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3630,14 +3630,14 @@ pub fn lsx_vnori_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vnori_b(transmute(a), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vxor_v(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vxor_v(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3646,14 +3646,14 @@ pub fn lsx_vxori_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vxori_b(transmute(a), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitsel_v(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vbitsel_v(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3662,7 +3662,7 @@ pub fn lsx_vbitseli_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitseli_b(transmute(a), transmute(b), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3671,7 +3671,7 @@ pub fn lsx_vshuf4i_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vshuf4i_b(transmute(a), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3680,7 +3680,7 @@ pub fn lsx_vshuf4i_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vshuf4i_h(transmute(a), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3689,119 +3689,119 @@ pub fn lsx_vshuf4i_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vshuf4i_w(transmute(a), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vreplgr2vr_b(a: i32) -> m128i { unsafe { transmute(__lsx_vreplgr2vr_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vreplgr2vr_h(a: i32) -> m128i { unsafe { transmute(__lsx_vreplgr2vr_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vreplgr2vr_w(a: i32) -> m128i { unsafe { transmute(__lsx_vreplgr2vr_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vreplgr2vr_d(a: i64) -> m128i { unsafe { transmute(__lsx_vreplgr2vr_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpcnt_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vpcnt_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpcnt_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vpcnt_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpcnt_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vpcnt_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpcnt_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vpcnt_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vclo_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vclo_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vclo_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vclo_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vclo_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vclo_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vclo_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vclo_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vclz_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vclz_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vclz_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vclz_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vclz_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vclz_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vclz_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vclz_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3810,7 +3810,7 @@ pub fn lsx_vpickve2gr_b(a: m128i) -> i32 { unsafe { transmute(__lsx_vpickve2gr_b(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3819,7 +3819,7 @@ pub fn lsx_vpickve2gr_h(a: m128i) -> i32 { unsafe { transmute(__lsx_vpickve2gr_h(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3828,7 +3828,7 @@ pub fn lsx_vpickve2gr_w(a: m128i) -> i32 { unsafe { transmute(__lsx_vpickve2gr_w(transmute(a), IMM2)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3837,7 +3837,7 @@ pub fn lsx_vpickve2gr_d(a: m128i) -> i64 { unsafe { transmute(__lsx_vpickve2gr_d(transmute(a), IMM1)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3846,7 +3846,7 @@ pub fn lsx_vpickve2gr_bu(a: m128i) -> u32 { unsafe { transmute(__lsx_vpickve2gr_bu(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3855,7 +3855,7 @@ pub fn lsx_vpickve2gr_hu(a: m128i) -> u32 { unsafe { transmute(__lsx_vpickve2gr_hu(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3864,7 +3864,7 @@ pub fn lsx_vpickve2gr_wu(a: m128i) -> u32 { unsafe { transmute(__lsx_vpickve2gr_wu(transmute(a), IMM2)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3873,7 +3873,7 @@ pub fn lsx_vpickve2gr_du(a: m128i) -> u64 { unsafe { transmute(__lsx_vpickve2gr_du(transmute(a), IMM1)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3882,7 +3882,7 @@ pub fn lsx_vinsgr2vr_b(a: m128i, b: i32) -> m128i { unsafe { transmute(__lsx_vinsgr2vr_b(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3891,7 +3891,7 @@ pub fn lsx_vinsgr2vr_h(a: m128i, b: i32) -> m128i { unsafe { transmute(__lsx_vinsgr2vr_h(transmute(a), transmute(b), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3900,7 +3900,7 @@ pub fn lsx_vinsgr2vr_w(a: m128i, b: i32) -> m128i { unsafe { transmute(__lsx_vinsgr2vr_w(transmute(a), transmute(b), IMM2)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3909,448 +3909,448 @@ pub fn lsx_vinsgr2vr_d(a: m128i, b: i64) -> m128i { unsafe { transmute(__lsx_vinsgr2vr_d(transmute(a), transmute(b), IMM1)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfadd_s(a: m128, b: m128) -> m128 { unsafe { transmute(__lsx_vfadd_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfadd_d(a: m128d, b: m128d) -> m128d { unsafe { transmute(__lsx_vfadd_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfsub_s(a: m128, b: m128) -> m128 { unsafe { transmute(__lsx_vfsub_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfsub_d(a: m128d, b: m128d) -> m128d { unsafe { transmute(__lsx_vfsub_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfmul_s(a: m128, b: m128) -> m128 { unsafe { transmute(__lsx_vfmul_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfmul_d(a: m128d, b: m128d) -> m128d { unsafe { transmute(__lsx_vfmul_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfdiv_s(a: m128, b: m128) -> m128 { unsafe { transmute(__lsx_vfdiv_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfdiv_d(a: m128d, b: m128d) -> m128d { unsafe { transmute(__lsx_vfdiv_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcvt_h_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcvt_h_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcvt_s_d(a: m128d, b: m128d) -> m128 { unsafe { transmute(__lsx_vfcvt_s_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfmin_s(a: m128, b: m128) -> m128 { unsafe { transmute(__lsx_vfmin_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfmin_d(a: m128d, b: m128d) -> m128d { unsafe { transmute(__lsx_vfmin_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfmina_s(a: m128, b: m128) -> m128 { unsafe { transmute(__lsx_vfmina_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfmina_d(a: m128d, b: m128d) -> m128d { unsafe { transmute(__lsx_vfmina_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfmax_s(a: m128, b: m128) -> m128 { unsafe { transmute(__lsx_vfmax_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfmax_d(a: m128d, b: m128d) -> m128d { unsafe { transmute(__lsx_vfmax_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfmaxa_s(a: m128, b: m128) -> m128 { unsafe { transmute(__lsx_vfmaxa_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfmaxa_d(a: m128d, b: m128d) -> m128d { unsafe { transmute(__lsx_vfmaxa_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfclass_s(a: m128) -> m128i { unsafe { transmute(__lsx_vfclass_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfclass_d(a: m128d) -> m128i { unsafe { transmute(__lsx_vfclass_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfsqrt_s(a: m128) -> m128 { unsafe { transmute(__lsx_vfsqrt_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfsqrt_d(a: m128d) -> m128d { unsafe { transmute(__lsx_vfsqrt_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrecip_s(a: m128) -> m128 { unsafe { transmute(__lsx_vfrecip_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrecip_d(a: m128d) -> m128d { unsafe { transmute(__lsx_vfrecip_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrecipe_s(a: m128) -> m128 { unsafe { transmute(__lsx_vfrecipe_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrecipe_d(a: m128d) -> m128d { unsafe { transmute(__lsx_vfrecipe_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrsqrte_s(a: m128) -> m128 { unsafe { transmute(__lsx_vfrsqrte_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrsqrte_d(a: m128d) -> m128d { unsafe { transmute(__lsx_vfrsqrte_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrint_s(a: m128) -> m128 { unsafe { transmute(__lsx_vfrint_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrint_d(a: m128d) -> m128d { unsafe { transmute(__lsx_vfrint_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrsqrt_s(a: m128) -> m128 { unsafe { transmute(__lsx_vfrsqrt_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrsqrt_d(a: m128d) -> m128d { unsafe { transmute(__lsx_vfrsqrt_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vflogb_s(a: m128) -> m128 { unsafe { transmute(__lsx_vflogb_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vflogb_d(a: m128d) -> m128d { unsafe { transmute(__lsx_vflogb_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcvth_s_h(a: m128i) -> m128 { unsafe { transmute(__lsx_vfcvth_s_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcvth_d_s(a: m128) -> m128d { unsafe { transmute(__lsx_vfcvth_d_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcvtl_s_h(a: m128i) -> m128 { unsafe { transmute(__lsx_vfcvtl_s_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcvtl_d_s(a: m128) -> m128d { unsafe { transmute(__lsx_vfcvtl_d_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftint_w_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftint_w_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftint_l_d(a: m128d) -> m128i { unsafe { transmute(__lsx_vftint_l_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftint_wu_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftint_wu_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftint_lu_d(a: m128d) -> m128i { unsafe { transmute(__lsx_vftint_lu_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrz_w_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrz_w_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrz_l_d(a: m128d) -> m128i { unsafe { transmute(__lsx_vftintrz_l_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrz_wu_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrz_wu_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrz_lu_d(a: m128d) -> m128i { unsafe { transmute(__lsx_vftintrz_lu_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vffint_s_w(a: m128i) -> m128 { unsafe { transmute(__lsx_vffint_s_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vffint_d_l(a: m128i) -> m128d { unsafe { transmute(__lsx_vffint_d_l(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vffint_s_wu(a: m128i) -> m128 { unsafe { transmute(__lsx_vffint_s_wu(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vffint_d_lu(a: m128i) -> m128d { unsafe { transmute(__lsx_vffint_d_lu(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vandn_v(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vandn_v(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vneg_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vneg_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vneg_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vneg_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vneg_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vneg_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vneg_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vneg_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmuh_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmuh_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmuh_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmuh_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmuh_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmuh_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmuh_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmuh_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmuh_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmuh_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmuh_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmuh_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmuh_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmuh_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmuh_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmuh_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4359,7 +4359,7 @@ pub fn lsx_vsllwil_h_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vsllwil_h_b(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4368,7 +4368,7 @@ pub fn lsx_vsllwil_w_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vsllwil_w_h(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4377,7 +4377,7 @@ pub fn lsx_vsllwil_d_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vsllwil_d_w(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4386,7 +4386,7 @@ pub fn lsx_vsllwil_hu_bu(a: m128i) -> m128i { unsafe { transmute(__lsx_vsllwil_hu_bu(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4395,7 +4395,7 @@ pub fn lsx_vsllwil_wu_hu(a: m128i) -> m128i { unsafe { transmute(__lsx_vsllwil_wu_hu(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4404,217 +4404,217 @@ pub fn lsx_vsllwil_du_wu(a: m128i) -> m128i { unsafe { transmute(__lsx_vsllwil_du_wu(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsran_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsran_b_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsran_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsran_h_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsran_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsran_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssran_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssran_b_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssran_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssran_h_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssran_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssran_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssran_bu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssran_bu_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssran_hu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssran_hu_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssran_wu_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssran_wu_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrarn_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrarn_b_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrarn_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrarn_h_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrarn_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrarn_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrarn_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarn_b_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrarn_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarn_h_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrarn_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarn_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrarn_bu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarn_bu_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrarn_hu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarn_hu_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrarn_wu_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarn_wu_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrln_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrln_b_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrln_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrln_h_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrln_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrln_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrln_bu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrln_bu_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrln_hu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrln_hu_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrln_wu_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrln_wu_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrlrn_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlrn_b_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrlrn_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlrn_h_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrlrn_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlrn_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrlrn_bu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrn_bu_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrlrn_hu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrn_hu_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrlrn_wu_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrn_wu_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4623,7 +4623,7 @@ pub fn lsx_vfrstpi_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vfrstpi_b(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4632,21 +4632,21 @@ pub fn lsx_vfrstpi_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vfrstpi_h(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrstp_b(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vfrstp_b(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrstp_h(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vfrstp_h(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4655,7 +4655,7 @@ pub fn lsx_vshuf4i_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vshuf4i_d(transmute(a), transmute(b), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4664,7 +4664,7 @@ pub fn lsx_vbsrl_v(a: m128i) -> m128i { unsafe { transmute(__lsx_vbsrl_v(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4673,7 +4673,7 @@ pub fn lsx_vbsll_v(a: m128i) -> m128i { unsafe { transmute(__lsx_vbsll_v(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4682,7 +4682,7 @@ pub fn lsx_vextrins_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vextrins_b(transmute(a), transmute(b), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4691,7 +4691,7 @@ pub fn lsx_vextrins_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vextrins_h(transmute(a), transmute(b), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4700,7 +4700,7 @@ pub fn lsx_vextrins_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vextrins_w(transmute(a), transmute(b), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4709,343 +4709,343 @@ pub fn lsx_vextrins_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vextrins_d(transmute(a), transmute(b), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmskltz_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vmskltz_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmskltz_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vmskltz_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmskltz_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vmskltz_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmskltz_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vmskltz_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsigncov_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsigncov_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsigncov_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsigncov_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsigncov_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsigncov_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsigncov_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsigncov_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfmadd_s(a: m128, b: m128, c: m128) -> m128 { unsafe { transmute(__lsx_vfmadd_s(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfmadd_d(a: m128d, b: m128d, c: m128d) -> m128d { unsafe { transmute(__lsx_vfmadd_d(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfmsub_s(a: m128, b: m128, c: m128) -> m128 { unsafe { transmute(__lsx_vfmsub_s(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfmsub_d(a: m128d, b: m128d, c: m128d) -> m128d { unsafe { transmute(__lsx_vfmsub_d(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfnmadd_s(a: m128, b: m128, c: m128) -> m128 { unsafe { transmute(__lsx_vfnmadd_s(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfnmadd_d(a: m128d, b: m128d, c: m128d) -> m128d { unsafe { transmute(__lsx_vfnmadd_d(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfnmsub_s(a: m128, b: m128, c: m128) -> m128 { unsafe { transmute(__lsx_vfnmsub_s(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfnmsub_d(a: m128d, b: m128d, c: m128d) -> m128d { unsafe { transmute(__lsx_vfnmsub_d(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrne_w_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrne_w_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrne_l_d(a: m128d) -> m128i { unsafe { transmute(__lsx_vftintrne_l_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrp_w_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrp_w_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrp_l_d(a: m128d) -> m128i { unsafe { transmute(__lsx_vftintrp_l_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrm_w_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrm_w_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrm_l_d(a: m128d) -> m128i { unsafe { transmute(__lsx_vftintrm_l_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftint_w_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vftint_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vffint_s_l(a: m128i, b: m128i) -> m128 { unsafe { transmute(__lsx_vffint_s_l(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrz_w_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vftintrz_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrp_w_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vftintrp_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrm_w_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vftintrm_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrne_w_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vftintrne_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintl_l_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintl_l_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftinth_l_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftinth_l_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vffinth_d_w(a: m128i) -> m128d { unsafe { transmute(__lsx_vffinth_d_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vffintl_d_w(a: m128i) -> m128d { unsafe { transmute(__lsx_vffintl_d_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrzl_l_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrzl_l_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrzh_l_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrzh_l_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrpl_l_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrpl_l_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrph_l_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrph_l_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrml_l_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrml_l_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrmh_l_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrmh_l_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrnel_l_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrnel_l_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrneh_l_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrneh_l_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrintrne_s(a: m128) -> m128 { unsafe { transmute(__lsx_vfrintrne_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrintrne_d(a: m128d) -> m128d { unsafe { transmute(__lsx_vfrintrne_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrintrz_s(a: m128) -> m128 { unsafe { transmute(__lsx_vfrintrz_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrintrz_d(a: m128d) -> m128d { unsafe { transmute(__lsx_vfrintrz_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrintrp_s(a: m128) -> m128 { unsafe { transmute(__lsx_vfrintrp_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrintrp_d(a: m128d) -> m128d { unsafe { transmute(__lsx_vfrintrp_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrintrm_s(a: m128) -> m128 { unsafe { transmute(__lsx_vfrintrm_s(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrintrm_d(a: m128d) -> m128d { unsafe { transmute(__lsx_vfrintrm_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2, 3)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5055,7 +5055,7 @@ pub unsafe fn lsx_vstelm_b(a: m128i, mem_add transmute(__lsx_vstelm_b(transmute(a), mem_addr, IMM_S8, IMM4)) } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2, 3)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5065,7 +5065,7 @@ pub unsafe fn lsx_vstelm_h(a: m128i, mem_add transmute(__lsx_vstelm_h(transmute(a), mem_addr, IMM_S8, IMM3)) } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2, 3)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5075,7 +5075,7 @@ pub unsafe fn lsx_vstelm_w(a: m128i, mem_add transmute(__lsx_vstelm_w(transmute(a), mem_addr, IMM_S8, IMM2)) } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2, 3)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5085,693 +5085,693 @@ pub unsafe fn lsx_vstelm_d(a: m128i, mem_add transmute(__lsx_vstelm_d(transmute(a), mem_addr, IMM_S8, IMM1)) } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_d_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_d_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_w_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_w_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_h_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_h_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_d_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_d_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_w_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_w_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_h_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_h_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_d_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_d_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_w_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_w_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_h_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_h_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_d_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_d_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_w_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_w_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_h_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_h_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_d_wu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_d_wu_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_w_hu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_w_hu_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_h_bu_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_h_bu_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_d_wu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_d_wu_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_w_hu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_w_hu_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_h_bu_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_h_bu_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwev_d_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwev_d_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwev_w_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwev_w_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwev_h_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwev_h_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwod_d_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwod_d_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwod_w_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwod_w_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwod_h_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwod_h_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwev_d_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwev_d_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwev_w_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwev_w_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwev_h_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwev_h_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwod_d_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwod_d_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwod_w_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwod_w_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwod_h_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwod_h_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_q_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_q_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_q_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_q_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_q_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_q_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_q_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_q_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwev_q_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwev_q_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwod_q_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwod_q_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwev_q_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwev_q_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwod_q_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwod_q_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_q_du_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_q_du_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_q_du_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_q_du_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_d_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_d_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_w_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_w_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_h_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_h_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_d_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_d_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_w_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_w_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_h_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_h_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_d_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_d_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_w_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_w_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_h_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_h_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_d_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_d_wu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_w_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_w_hu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_h_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_h_bu(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_d_wu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_d_wu_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_w_hu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_w_hu_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_h_bu_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_h_bu_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_d_wu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_d_wu_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_w_hu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_w_hu_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_h_bu_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_h_bu_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_q_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_q_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_q_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_q_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_q_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_q_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_q_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_q_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_q_du_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_q_du_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_q_du_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_q_du_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhaddw_q_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhaddw_q_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhaddw_qu_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhaddw_qu_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhsubw_q_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhsubw_q_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhsubw_qu_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhsubw_qu_du(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_d_w(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_d_w(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_w_h(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_w_h(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_h_b(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_h_b(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_d_wu(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_d_wu(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_w_hu(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_w_hu(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_h_bu(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_h_bu(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_d_w(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_d_w(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_w_h(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_w_h(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_h_b(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_h_b(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_d_wu(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_d_wu(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_w_hu(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_w_hu(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_h_bu(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_h_bu(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_d_wu_w(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_d_wu_w(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_w_hu_h(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_w_hu_h(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_h_bu_b(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_h_bu_b(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_d_wu_w(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_d_wu_w(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_w_hu_h(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_w_hu_h(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_h_bu_b(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_h_bu_b(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_q_d(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_q_d(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_q_d(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_q_d(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_q_du(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_q_du(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_q_du(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_q_du(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_q_du_d(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_q_du_d(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_q_du_d(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_q_du_d(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vrotr_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vrotr_b(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vrotr_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vrotr_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vrotr_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vrotr_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vrotr_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vrotr_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vadd_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vadd_q(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsub_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsub_q(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5780,7 +5780,7 @@ pub unsafe fn lsx_vldrepl_b(mem_addr: *const i8) -> m128i { transmute(__lsx_vldrepl_b(mem_addr, IMM_S12)) } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5789,7 +5789,7 @@ pub unsafe fn lsx_vldrepl_h(mem_addr: *const i8) -> m128i { transmute(__lsx_vldrepl_h(mem_addr, IMM_S11)) } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5798,7 +5798,7 @@ pub unsafe fn lsx_vldrepl_w(mem_addr: *const i8) -> m128i { transmute(__lsx_vldrepl_w(mem_addr, IMM_S10)) } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5807,77 +5807,77 @@ pub unsafe fn lsx_vldrepl_d(mem_addr: *const i8) -> m128i { transmute(__lsx_vldrepl_d(mem_addr, IMM_S9)) } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmskgez_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vmskgez_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmsknz_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vmsknz_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vexth_h_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vexth_h_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vexth_w_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vexth_w_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vexth_d_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vexth_d_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vexth_q_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vexth_q_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vexth_hu_bu(a: m128i) -> m128i { unsafe { transmute(__lsx_vexth_hu_bu(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vexth_wu_hu(a: m128i) -> m128i { unsafe { transmute(__lsx_vexth_wu_hu(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vexth_du_wu(a: m128i) -> m128i { unsafe { transmute(__lsx_vexth_du_wu(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vexth_qu_du(a: m128i) -> m128i { unsafe { transmute(__lsx_vexth_qu_du(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5886,7 +5886,7 @@ pub fn lsx_vrotri_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vrotri_b(transmute(a), IMM3)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5895,7 +5895,7 @@ pub fn lsx_vrotri_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vrotri_h(transmute(a), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5904,7 +5904,7 @@ pub fn lsx_vrotri_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vrotri_w(transmute(a), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5913,14 +5913,14 @@ pub fn lsx_vrotri_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vrotri_d(transmute(a), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vextl_q_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vextl_q_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5929,7 +5929,7 @@ pub fn lsx_vsrlni_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5938,7 +5938,7 @@ pub fn lsx_vsrlni_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5947,7 +5947,7 @@ pub fn lsx_vsrlni_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5956,7 +5956,7 @@ pub fn lsx_vsrlni_d_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5965,7 +5965,7 @@ pub fn lsx_vsrlrni_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlrni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5974,7 +5974,7 @@ pub fn lsx_vsrlrni_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlrni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5983,7 +5983,7 @@ pub fn lsx_vsrlrni_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlrni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5992,7 +5992,7 @@ pub fn lsx_vsrlrni_d_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlrni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6001,7 +6001,7 @@ pub fn lsx_vssrlni_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6010,7 +6010,7 @@ pub fn lsx_vssrlni_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6019,7 +6019,7 @@ pub fn lsx_vssrlni_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6028,7 +6028,7 @@ pub fn lsx_vssrlni_d_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6037,7 +6037,7 @@ pub fn lsx_vssrlni_bu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlni_bu_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6046,7 +6046,7 @@ pub fn lsx_vssrlni_hu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlni_hu_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6055,7 +6055,7 @@ pub fn lsx_vssrlni_wu_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlni_wu_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6064,7 +6064,7 @@ pub fn lsx_vssrlni_du_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlni_du_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6073,7 +6073,7 @@ pub fn lsx_vssrlrni_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6082,7 +6082,7 @@ pub fn lsx_vssrlrni_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6091,7 +6091,7 @@ pub fn lsx_vssrlrni_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6100,7 +6100,7 @@ pub fn lsx_vssrlrni_d_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6109,7 +6109,7 @@ pub fn lsx_vssrlrni_bu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrni_bu_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6118,7 +6118,7 @@ pub fn lsx_vssrlrni_hu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrni_hu_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6127,7 +6127,7 @@ pub fn lsx_vssrlrni_wu_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrni_wu_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6136,7 +6136,7 @@ pub fn lsx_vssrlrni_du_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrni_du_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6145,7 +6145,7 @@ pub fn lsx_vsrani_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrani_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6154,7 +6154,7 @@ pub fn lsx_vsrani_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrani_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6163,7 +6163,7 @@ pub fn lsx_vsrani_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrani_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6172,7 +6172,7 @@ pub fn lsx_vsrani_d_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrani_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6181,7 +6181,7 @@ pub fn lsx_vsrarni_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrarni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6190,7 +6190,7 @@ pub fn lsx_vsrarni_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrarni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6199,7 +6199,7 @@ pub fn lsx_vsrarni_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrarni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6208,7 +6208,7 @@ pub fn lsx_vsrarni_d_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrarni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6217,7 +6217,7 @@ pub fn lsx_vssrani_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrani_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6226,7 +6226,7 @@ pub fn lsx_vssrani_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrani_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6235,7 +6235,7 @@ pub fn lsx_vssrani_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrani_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6244,7 +6244,7 @@ pub fn lsx_vssrani_d_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrani_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6253,7 +6253,7 @@ pub fn lsx_vssrani_bu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrani_bu_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6262,7 +6262,7 @@ pub fn lsx_vssrani_hu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrani_hu_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6271,7 +6271,7 @@ pub fn lsx_vssrani_wu_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrani_wu_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6280,7 +6280,7 @@ pub fn lsx_vssrani_du_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrani_du_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6289,7 +6289,7 @@ pub fn lsx_vssrarni_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6298,7 +6298,7 @@ pub fn lsx_vssrarni_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6307,7 +6307,7 @@ pub fn lsx_vssrarni_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6316,7 +6316,7 @@ pub fn lsx_vssrarni_d_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6325,7 +6325,7 @@ pub fn lsx_vssrarni_bu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarni_bu_h(transmute(a), transmute(b), IMM4)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6334,7 +6334,7 @@ pub fn lsx_vssrarni_hu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarni_hu_w(transmute(a), transmute(b), IMM5)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6343,7 +6343,7 @@ pub fn lsx_vssrarni_wu_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarni_wu_d(transmute(a), transmute(b), IMM6)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6352,7 +6352,7 @@ pub fn lsx_vssrarni_du_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarni_du_q(transmute(a), transmute(b), IMM7)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6361,7 +6361,7 @@ pub fn lsx_vpermi_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpermi_w(transmute(a), transmute(b), IMM8)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6370,7 +6370,7 @@ pub unsafe fn lsx_vld(mem_addr: *const i8) -> m128i { transmute(__lsx_vld(mem_addr, IMM_S12)) } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6379,56 +6379,56 @@ pub unsafe fn lsx_vst(a: m128i, mem_addr: *mut i8) { transmute(__lsx_vst(transmute(a), mem_addr, IMM_S12)) } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrlrn_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrn_b_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrlrn_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrn_h_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrlrn_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrn_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrln_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrln_b_h(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrln_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrln_h_w(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrln_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrln_w_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vorn_v(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vorn_v(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6437,413 +6437,413 @@ pub fn lsx_vldi() -> m128i { unsafe { transmute(__lsx_vldi(IMM_S13)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vshuf_b(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vshuf_b(transmute(a), transmute(b), transmute(c))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn lsx_vldx(mem_addr: *const i8, b: i64) -> m128i { transmute(__lsx_vldx(mem_addr, transmute(b))) } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn lsx_vstx(a: m128i, mem_addr: *mut i8, b: i64) { transmute(__lsx_vstx(transmute(a), mem_addr, transmute(b))) } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vextl_qu_du(a: m128i) -> m128i { unsafe { transmute(__lsx_vextl_qu_du(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_bnz_b(a: m128i) -> i32 { unsafe { transmute(__lsx_bnz_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_bnz_d(a: m128i) -> i32 { unsafe { transmute(__lsx_bnz_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_bnz_h(a: m128i) -> i32 { unsafe { transmute(__lsx_bnz_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_bnz_v(a: m128i) -> i32 { unsafe { transmute(__lsx_bnz_v(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_bnz_w(a: m128i) -> i32 { unsafe { transmute(__lsx_bnz_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_bz_b(a: m128i) -> i32 { unsafe { transmute(__lsx_bz_b(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_bz_d(a: m128i) -> i32 { unsafe { transmute(__lsx_bz_d(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_bz_h(a: m128i) -> i32 { unsafe { transmute(__lsx_bz_h(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_bz_v(a: m128i) -> i32 { unsafe { transmute(__lsx_bz_v(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_bz_w(a: m128i) -> i32 { unsafe { transmute(__lsx_bz_w(transmute(a))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_caf_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_caf_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_caf_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_caf_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_ceq_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_ceq_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_ceq_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_ceq_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cle_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_cle_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cle_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_cle_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_clt_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_clt_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_clt_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_clt_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cne_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_cne_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cne_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_cne_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cor_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_cor_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cor_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_cor_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cueq_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_cueq_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cueq_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_cueq_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cule_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_cule_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cule_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_cule_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cult_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_cult_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cult_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_cult_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cun_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_cun_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cune_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_cune_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cune_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_cune_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cun_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_cun_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_saf_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_saf_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_saf_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_saf_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_seq_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_seq_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_seq_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_seq_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sle_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_sle_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sle_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_sle_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_slt_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_slt_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_slt_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_slt_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sne_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_sne_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sne_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_sne_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sor_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_sor_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sor_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_sor_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sueq_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_sueq_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sueq_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_sueq_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sule_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_sule_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sule_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_sule_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sult_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_sult_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sult_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_sult_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sun_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_sun_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sune_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_sune_d(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sune_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_sune_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sun_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_sun_s(transmute(a), transmute(b))) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6852,7 +6852,7 @@ pub fn lsx_vrepli_b() -> m128i { unsafe { transmute(__lsx_vrepli_b(IMM_S10)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6861,7 +6861,7 @@ pub fn lsx_vrepli_d() -> m128i { unsafe { transmute(__lsx_vrepli_d(IMM_S10)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6870,7 +6870,7 @@ pub fn lsx_vrepli_h() -> m128i { unsafe { transmute(__lsx_vrepli_h(IMM_S10)) } } -#[inline] +#[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] diff --git a/library/stdarch/crates/core_arch/src/loongarch64/mod.rs b/library/stdarch/crates/core_arch/src/loongarch64/mod.rs index ab968aff20bbe..41c21aac2a574 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/mod.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/mod.rs @@ -11,7 +11,7 @@ pub use self::lsx::*; use crate::arch::asm; /// Reads the 64-bit stable counter value and the counter ID -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn rdtime_d() -> (i64, isize) { let (val, tid): (i64, isize); @@ -48,21 +48,21 @@ unsafe extern "unadjusted" { } /// Calculate the CRC value using the IEEE 802.3 polynomial (0xEDB88320) -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn crc_w_d_w(a: i64, b: i32) -> i32 { unsafe { __crc_w_d_w(a, b) } } /// Calculate the CRC value using the Castagnoli polynomial (0x82F63B78) -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn crcc_w_d_w(a: i64, b: i32) -> i32 { unsafe { __crcc_w_d_w(a, b) } } /// Generates the cache operation instruction -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn cacop(b: i64) { static_assert_uimm_bits!(IMM5, 5); @@ -71,7 +71,7 @@ pub unsafe fn cacop(b: i64) { } /// Reads the CSR -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn csrrd() -> i64 { static_assert_uimm_bits!(IMM14, 14); @@ -79,7 +79,7 @@ pub unsafe fn csrrd() -> i64 { } /// Writes the CSR -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn csrwr(a: i64) -> i64 { static_assert_uimm_bits!(IMM14, 14); @@ -87,7 +87,7 @@ pub unsafe fn csrwr(a: i64) -> i64 { } /// Exchanges the CSR -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn csrxchg(a: i64, b: i64) -> i64 { static_assert_uimm_bits!(IMM14, 14); @@ -95,35 +95,35 @@ pub unsafe fn csrxchg(a: i64, b: i64) -> i64 { } /// Reads the 64-bit IO-CSR -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn iocsrrd_d(a: i32) -> i64 { __iocsrrd_d(a) } /// Writes the 64-bit IO-CSR -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn iocsrwr_d(a: i64, b: i32) { __iocsrwr_d(a, b) } /// Generates the less-than-or-equal asseration instruction -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn asrtle(a: i64, b: i64) { __asrtle(a, b); } /// Generates the greater-than asseration instruction -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn asrtgt(a: i64, b: i64) { __asrtgt(a, b); } /// Loads the page table directory entry -#[inline] +#[inline(always)] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn lddir(a: i64) -> i64 { @@ -132,7 +132,7 @@ pub unsafe fn lddir(a: i64) -> i64 { } /// Loads the page table entry -#[inline] +#[inline(always)] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn ldpte(a: i64) { diff --git a/library/stdarch/crates/core_arch/src/loongarch_shared/mod.rs b/library/stdarch/crates/core_arch/src/loongarch_shared/mod.rs index 8991fe857682b..b2a67fb609974 100644 --- a/library/stdarch/crates/core_arch/src/loongarch_shared/mod.rs +++ b/library/stdarch/crates/core_arch/src/loongarch_shared/mod.rs @@ -3,7 +3,7 @@ use crate::arch::asm; /// Reads the lower 32-bit stable counter value and the counter ID -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn rdtimel_w() -> (i32, isize) { let (val, tid): (i32, isize); @@ -12,7 +12,7 @@ pub fn rdtimel_w() -> (i32, isize) { } /// Reads the upper 32-bit stable counter value and the counter ID -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn rdtimeh_w() -> (i32, isize) { let (val, tid): (i32, isize); @@ -71,49 +71,49 @@ unsafe extern "unadjusted" { } /// Calculate the CRC value using the IEEE 802.3 polynomial (0xEDB88320) -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn crc_w_b_w(a: i32, b: i32) -> i32 { unsafe { __crc_w_b_w(a, b) } } /// Calculate the CRC value using the IEEE 802.3 polynomial (0xEDB88320) -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn crc_w_h_w(a: i32, b: i32) -> i32 { unsafe { __crc_w_h_w(a, b) } } /// Calculate the CRC value using the IEEE 802.3 polynomial (0xEDB88320) -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn crc_w_w_w(a: i32, b: i32) -> i32 { unsafe { __crc_w_w_w(a, b) } } /// Calculate the CRC value using the Castagnoli polynomial (0x82F63B78) -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn crcc_w_b_w(a: i32, b: i32) -> i32 { unsafe { __crcc_w_b_w(a, b) } } /// Calculate the CRC value using the Castagnoli polynomial (0x82F63B78) -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn crcc_w_h_w(a: i32, b: i32) -> i32 { unsafe { __crcc_w_h_w(a, b) } } /// Calculate the CRC value using the Castagnoli polynomial (0x82F63B78) -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn crcc_w_w_w(a: i32, b: i32) -> i32 { unsafe { __crcc_w_w_w(a, b) } } /// Generates the memory barrier instruction -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn dbar() { static_assert_uimm_bits!(IMM15, 15); @@ -121,7 +121,7 @@ pub fn dbar() { } /// Generates the instruction-fetch barrier instruction -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn ibar() { static_assert_uimm_bits!(IMM15, 15); @@ -129,7 +129,7 @@ pub fn ibar() { } /// Moves data from a GPR to the FCSR -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn movgr2fcsr(a: i32) { static_assert_uimm_bits!(IMM2, 2); @@ -137,7 +137,7 @@ pub unsafe fn movgr2fcsr(a: i32) { } /// Moves data from a FCSR to the GPR -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn movfcsr2gr() -> i32 { static_assert_uimm_bits!(IMM2, 2); @@ -145,49 +145,49 @@ pub fn movfcsr2gr() -> i32 { } /// Reads the 8-bit IO-CSR -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn iocsrrd_b(a: i32) -> i32 { __iocsrrd_b(a) } /// Reads the 16-bit IO-CSR -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn iocsrrd_h(a: i32) -> i32 { __iocsrrd_h(a) } /// Reads the 32-bit IO-CSR -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn iocsrrd_w(a: i32) -> i32 { __iocsrrd_w(a) } /// Writes the 8-bit IO-CSR -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn iocsrwr_b(a: i32, b: i32) { __iocsrwr_b(a, b) } /// Writes the 16-bit IO-CSR -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn iocsrwr_h(a: i32, b: i32) { __iocsrwr_h(a, b) } /// Writes the 32-bit IO-CSR -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn iocsrwr_w(a: i32, b: i32) { __iocsrwr_w(a, b) } /// Generates the breakpoint instruction -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn brk() { static_assert_uimm_bits!(IMM15, 15); @@ -195,14 +195,14 @@ pub unsafe fn brk() { } /// Reads the CPU configuration register -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn cpucfg(a: i32) -> i32 { unsafe { __cpucfg(a) } } /// Generates the syscall instruction -#[inline] +#[inline(always)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn syscall() { static_assert_uimm_bits!(IMM15, 15); @@ -210,7 +210,7 @@ pub unsafe fn syscall() { } /// Calculate the approximate single-precision result of 1.0 divided -#[inline] +#[inline(always)] #[target_feature(enable = "frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn frecipe_s(a: f32) -> f32 { @@ -218,7 +218,7 @@ pub fn frecipe_s(a: f32) -> f32 { } /// Calculate the approximate double-precision result of 1.0 divided -#[inline] +#[inline(always)] #[target_feature(enable = "frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn frecipe_d(a: f64) -> f64 { @@ -226,7 +226,7 @@ pub fn frecipe_d(a: f64) -> f64 { } /// Calculate the approximate single-precision result of dividing 1.0 by the square root -#[inline] +#[inline(always)] #[target_feature(enable = "frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn frsqrte_s(a: f32) -> f32 { @@ -234,7 +234,7 @@ pub fn frsqrte_s(a: f32) -> f32 { } /// Calculate the approximate double-precision result of dividing 1.0 by the square root -#[inline] +#[inline(always)] #[target_feature(enable = "frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn frsqrte_d(a: f64) -> f64 { diff --git a/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs b/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs index 10b87c70e9ede..fdc9b2b6bcabe 100644 --- a/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs +++ b/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs @@ -597,7 +597,7 @@ fn gen_bind_body( let function = if !rustc_legacy_const_generics.is_empty() { format!( r#" -#[inline]{target_feature} +#[inline(always)]{target_feature} #[{rustc_legacy_const_generics}] #[unstable(feature = "stdarch_loongarch", issue = "117427")] {fn_decl}{{ @@ -609,7 +609,7 @@ fn gen_bind_body( } else { format!( r#" -#[inline]{target_feature} +#[inline(always)]{target_feature} #[unstable(feature = "stdarch_loongarch", issue = "117427")] {fn_decl}{{ {call_params} From dc3ba83196d2afb60010dead36ea6150bd32dbda Mon Sep 17 00:00:00 2001 From: Jynn Nelson Date: Thu, 19 Mar 2026 13:43:40 +0100 Subject: [PATCH 10/64] Add more docs on how to run the generator - Note that working directory matters - Note that stdarch-gen-arm uses nightly - Fix missing directory. Without this, it would print to stdout in one giant merged file. --- library/stdarch/crates/stdarch-gen-arm/README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/library/stdarch/crates/stdarch-gen-arm/README.md b/library/stdarch/crates/stdarch-gen-arm/README.md index 4da14bcbb6c9b..64f1183f1d6d4 100644 --- a/library/stdarch/crates/stdarch-gen-arm/README.md +++ b/library/stdarch/crates/stdarch-gen-arm/README.md @@ -1,11 +1,11 @@ # stdarch-gen-arm generator guide ## Running the generator -- Run: `cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec` -``` -$ cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec - Finished `dev` profile [unoptimized + debuginfo] target(s) in 0.18s - Running `target/debug/stdarch-gen-arm crates/stdarch-gen-arm/spec` -``` + +Run: `cargo +nightly run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec crates/core_arch/src` + +NOTE: If you are running this from rust-lang/rust, you must be in the `library/stdarch` +working directory. + ## Input/Output ### Input files (intrinsic YAML definitions) - `crates/stdarch-gen-arm/spec//*.spec.yml` From 2c76cb3479129ab9653c9b6be9ea83925352b1f8 Mon Sep 17 00:00:00 2001 From: ArunTamil21 Date: Sun, 22 Mar 2026 19:50:07 +0000 Subject: [PATCH 11/64] Use const fn and remove unsafe from alias test wrappers --- .../stdarch/crates/core_arch/src/x86/sse.rs | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/x86/sse.rs b/library/stdarch/crates/core_arch/src/x86/sse.rs index 11fb3a865b30d..4d052186bcae8 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse.rs @@ -3011,12 +3011,12 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_cvtss_si32() { + fn test_mm_cvtss_si32() { test_mm_cvtss_si32_impl!(_mm_cvtss_si32); } #[simd_test(enable = "sse")] - unsafe fn test_mm_cvt_ss2si() { + fn test_mm_cvt_ss2si() { test_mm_cvtss_si32_impl!(_mm_cvt_ss2si); } @@ -3043,12 +3043,12 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_cvttss_si32() { + fn test_mm_cvttss_si32() { test_cvttss_si32_impl!(_mm_cvttss_si32); } #[simd_test(enable = "sse")] - unsafe fn test_mm_cvtt_ss2si() { + fn test_mm_cvtt_ss2si() { test_cvttss_si32_impl!(_mm_cvtt_ss2si); } @@ -3071,12 +3071,12 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_cvtsi32_ss() { + fn test_mm_cvtsi32_ss() { test_mm_cvtsi32_ss_impl!(_mm_cvtsi32_ss); } #[simd_test(enable = "sse")] - unsafe fn test_mm_cvt_si2ss() { + fn test_mm_cvt_si2ss() { test_mm_cvtsi32_ss_impl!(_mm_cvt_si2ss); } @@ -3103,12 +3103,12 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_set1_ps() { + const fn test_mm_set1_ps() { test_mm_set1_ps_impl!(_mm_set1_ps); } #[simd_test(enable = "sse")] - unsafe fn test_mm_set_ps1() { + const fn test_mm_set_ps1() { test_mm_set1_ps_impl!(_mm_set_ps1); } @@ -3201,18 +3201,18 @@ mod tests { macro_rules! test_mm_load1_ps_impl { ($alias:ident) => { let a = 42.0f32; - let r = $alias(ptr::addr_of!(a)); + let r = unsafe { $alias(ptr::addr_of!(a)) }; assert_eq_m128(r, _mm_setr_ps(42.0, 42.0, 42.0, 42.0)); }; } #[simd_test(enable = "sse")] - unsafe fn test_mm_load1_ps() { + fn test_mm_load1_ps() { test_mm_load1_ps_impl!(_mm_load1_ps); } #[simd_test(enable = "sse")] - unsafe fn test_mm_load_ps1() { + fn test_mm_load_ps1() { test_mm_load1_ps_impl!(_mm_load_ps1); } @@ -3270,18 +3270,18 @@ mod tests { let mut vals = Memory { data: [0.0f32; 4] }; let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); let p = vals.data.as_mut_ptr(); - $alias(p, *black_box(&a)); + unsafe { $alias(p, *black_box(&a)) }; assert_eq!(vals.data, [1.0, 1.0, 1.0, 1.0]); }; } #[simd_test(enable = "sse")] - unsafe fn test_mm_store1_ps() { + fn test_mm_store1_ps() { test_mm_store1_ps_impl!(_mm_store1_ps); } #[simd_test(enable = "sse")] - unsafe fn test_mm_store_ps1() { + fn test_mm_store_ps1() { test_mm_store1_ps_impl!(_mm_store_ps1); } From 30573344b0ab160c2aef0e0e2cf51b92d7ab69ef Mon Sep 17 00:00:00 2001 From: ArunTamil21 Date: Tue, 24 Mar 2026 12:23:05 +0000 Subject: [PATCH 12/64] Mark alias test wrappers as const fn where supported and clean up skip list --- library/stdarch/crates/core_arch/src/x86/sse.rs | 10 +++++----- .../stdarch/crates/stdarch-verify/tests/x86-intel.rs | 1 - 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/x86/sse.rs b/library/stdarch/crates/core_arch/src/x86/sse.rs index 4d052186bcae8..c6531e839a7d8 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse.rs @@ -3071,7 +3071,7 @@ mod tests { } #[simd_test(enable = "sse")] - fn test_mm_cvtsi32_ss() { + const fn test_mm_cvtsi32_ss() { test_mm_cvtsi32_ss_impl!(_mm_cvtsi32_ss); } @@ -3207,12 +3207,12 @@ mod tests { } #[simd_test(enable = "sse")] - fn test_mm_load1_ps() { + const fn test_mm_load1_ps() { test_mm_load1_ps_impl!(_mm_load1_ps); } #[simd_test(enable = "sse")] - fn test_mm_load_ps1() { + const fn test_mm_load_ps1() { test_mm_load1_ps_impl!(_mm_load_ps1); } @@ -3276,12 +3276,12 @@ mod tests { } #[simd_test(enable = "sse")] - fn test_mm_store1_ps() { + const fn test_mm_store1_ps() { test_mm_store1_ps_impl!(_mm_store1_ps); } #[simd_test(enable = "sse")] - fn test_mm_store_ps1() { + const fn test_mm_store_ps1() { test_mm_store1_ps_impl!(_mm_store_ps1); } diff --git a/library/stdarch/crates/stdarch-verify/tests/x86-intel.rs b/library/stdarch/crates/stdarch-verify/tests/x86-intel.rs index 754be9dc39f95..024a873de16e4 100644 --- a/library/stdarch/crates/stdarch-verify/tests/x86-intel.rs +++ b/library/stdarch/crates/stdarch-verify/tests/x86-intel.rs @@ -250,7 +250,6 @@ fn verify_all_signatures() { "_mm_cvt_ss2si", "_mm_cvtt_ss2si", "_mm_cvt_si2ss", - "_mm_set_ps1", "_mm_bslli_si128", "_mm_bsrli_si128", "_bextr2_u32", From 8e815f9ae19ee3236bdc2bd552447dc5a900ef4b Mon Sep 17 00:00:00 2001 From: The rustc-josh-sync Cronjob Bot Date: Tue, 31 Mar 2026 15:48:28 +0000 Subject: [PATCH 13/64] Prepare for merging from rust-lang/rust This updates the rust-version file to e4fdb554ad2c0270473181438e338c42b5b30b0c. --- library/stdarch/rust-version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/stdarch/rust-version b/library/stdarch/rust-version index db9492636f6ac..a8efb5c477c1f 100644 --- a/library/stdarch/rust-version +++ b/library/stdarch/rust-version @@ -1 +1 @@ -eda4fc7733ee89e484d7120cafbd80dcb2fce66e +e4fdb554ad2c0270473181438e338c42b5b30b0c From 1276db57450cb127221004625bcb7acb27022057 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Ber=C3=A1nek?= Date: Wed, 1 Apr 2026 11:05:53 +0200 Subject: [PATCH 14/64] Fix rustc-pull CI workflow --- library/stdarch/.github/workflows/rustc-pull.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/library/stdarch/.github/workflows/rustc-pull.yml b/library/stdarch/.github/workflows/rustc-pull.yml index ee0c498878f42..d2feb1add6344 100644 --- a/library/stdarch/.github/workflows/rustc-pull.yml +++ b/library/stdarch/.github/workflows/rustc-pull.yml @@ -13,6 +13,7 @@ jobs: uses: rust-lang/josh-sync/.github/workflows/rustc-pull.yml@main with: github-app-id: ${{ vars.APP_CLIENT_ID }} + pr-author: "workflows-stdarch[bot]" # https://rust-lang.zulipchat.com/#narrow/channel/208962-t-libs.2Fstdarch/topic/Subtree.20sync.20automation/with/528461782 zulip-stream-id: 208962 zulip-bot-email: "stdarch-ci-bot@rust-lang.zulipchat.com" From c4f14ad560d6b821c1e30e8f75eefb8e90256ad9 Mon Sep 17 00:00:00 2001 From: xizheyin Date: Thu, 2 Apr 2026 03:25:08 +0800 Subject: [PATCH 15/64] Avoid suggest format string field access for braced paths --- .../rustc_resolve/src/late/diagnostics.rs | 15 ++++++---- ...ugg-field-in-format-string-issue-141136.rs | 1 + ...field-in-format-string-issue-141136.stderr | 28 ++++++++++++++++--- ...e-with-name-similar-to-struct-field.stderr | 5 +++- 4 files changed, 38 insertions(+), 11 deletions(-) diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs index cf048231bd607..7ced2f579684f 100644 --- a/compiler/rustc_resolve/src/late/diagnostics.rs +++ b/compiler/rustc_resolve/src/late/diagnostics.rs @@ -26,7 +26,7 @@ use rustc_middle::ty; use rustc_session::{Session, lint}; use rustc_span::edit_distance::{edit_distance, find_best_match_for_name}; use rustc_span::edition::Edition; -use rustc_span::{DUMMY_SP, Ident, Span, Symbol, kw, sym}; +use rustc_span::{DUMMY_SP, DesugaringKind, Ident, Span, Symbol, kw, sym}; use thin_vec::ThinVec; use tracing::debug; @@ -980,12 +980,15 @@ impl<'ast, 'ra, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { AssocSuggestion::Field(field_span) => { if self_is_available { let source_map = self.r.tcx.sess.source_map(); - // check if the field is used in a format string, such as `"{x}"` - let field_is_format_named_arg = source_map + let field_is_format_named_arg = matches!( + span.desugaring_kind(), + Some(DesugaringKind::FormatLiteral { .. }) + ) && source_map .span_to_source(span, |s, start, _| { - Ok(s.get(start - 1..start) == Some("{")) - }); - if let Ok(true) = field_is_format_named_arg { + Ok(s.get(start.saturating_sub(1)..start) == Some("{")) + }) + .unwrap_or(false); + if field_is_format_named_arg { err.help( format!("you might have meant to use the available field in a format string: `\"{{}}\", self.{}`", segment.ident.name), ); diff --git a/tests/ui/resolve/suggestions/sugg-field-in-format-string-issue-141136.rs b/tests/ui/resolve/suggestions/sugg-field-in-format-string-issue-141136.rs index d2aa61186bcd0..f29ec4bfe7dd8 100644 --- a/tests/ui/resolve/suggestions/sugg-field-in-format-string-issue-141136.rs +++ b/tests/ui/resolve/suggestions/sugg-field-in-format-string-issue-141136.rs @@ -9,6 +9,7 @@ impl Foo { let _ = format!("{ x}"); //~ ERROR invalid format string: expected `}`, found `x` let _ = format!("{}", x); //~ ERROR cannot find value `x` in this scope [E0425] println!("{x}"); //~ ERROR cannot find value `x` in this scope [E0425] + let _ = {x}; //~ERROR cannot find value `x` in this scope [E0425] } } diff --git a/tests/ui/resolve/suggestions/sugg-field-in-format-string-issue-141136.stderr b/tests/ui/resolve/suggestions/sugg-field-in-format-string-issue-141136.stderr index 0a84848081d5c..c0e3f2ee5ddb2 100644 --- a/tests/ui/resolve/suggestions/sugg-field-in-format-string-issue-141136.stderr +++ b/tests/ui/resolve/suggestions/sugg-field-in-format-string-issue-141136.stderr @@ -14,7 +14,10 @@ error[E0425]: cannot find value `x` in this scope LL | let _ = format!("{x}"); | ^ | - = help: you might have meant to use the available field in a format string: `"{}", self.x` +help: you might have meant to use the available field + | +LL | let _ = format!("{self.x}"); + | +++++ error[E0425]: cannot find value `x` in this scope --> $DIR/sugg-field-in-format-string-issue-141136.rs:8:27 @@ -22,7 +25,10 @@ error[E0425]: cannot find value `x` in this scope LL | let _ = format!("{x }"); | ^^ | - = help: you might have meant to use the available field in a format string: `"{}", self.x` +help: you might have meant to use the available field + | +LL | let _ = format!("{self.x }"); + | +++++ error[E0425]: cannot find value `x` in this scope --> $DIR/sugg-field-in-format-string-issue-141136.rs:10:31 @@ -41,8 +47,22 @@ error[E0425]: cannot find value `x` in this scope LL | println!("{x}"); | ^ | - = help: you might have meant to use the available field in a format string: `"{}", self.x` +help: you might have meant to use the available field + | +LL | println!("{self.x}"); + | +++++ + +error[E0425]: cannot find value `x` in this scope + --> $DIR/sugg-field-in-format-string-issue-141136.rs:12:18 + | +LL | let _ = {x}; + | ^ + | +help: you might have meant to use the available field + | +LL | let _ = {self.x}; + | +++++ -error: aborting due to 5 previous errors +error: aborting due to 6 previous errors For more information about this error, try `rustc --explain E0425`. diff --git a/tests/ui/resolve/typo-suggestion-for-variable-with-name-similar-to-struct-field.stderr b/tests/ui/resolve/typo-suggestion-for-variable-with-name-similar-to-struct-field.stderr index 9c874d980cbe1..e7dcf2fdfe96d 100644 --- a/tests/ui/resolve/typo-suggestion-for-variable-with-name-similar-to-struct-field.stderr +++ b/tests/ui/resolve/typo-suggestion-for-variable-with-name-similar-to-struct-field.stderr @@ -34,7 +34,10 @@ error[E0425]: cannot find value `config` in this scope LL | println!("{config}"); | ^^^^^^ | - = help: you might have meant to use the available field in a format string: `"{}", self.config` +help: you might have meant to use the available field + | +LL | println!("{self.config}"); + | +++++ help: a local variable with a similar name exists | LL - println!("{config}"); From 598640b10192fc0c7c0fe3e427c97bd880ee31e1 Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Mon, 6 Apr 2026 19:13:30 +0200 Subject: [PATCH 16/64] disable hexagon tests for now --- library/stdarch/.github/workflows/main.yml | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/library/stdarch/.github/workflows/main.yml b/library/stdarch/.github/workflows/main.yml index 3749ed1f6ac81..966ba0ba9e96a 100644 --- a/library/stdarch/.github/workflows/main.yml +++ b/library/stdarch/.github/workflows/main.yml @@ -96,8 +96,9 @@ jobs: os: ubuntu-latest - tuple: loongarch64-unknown-linux-gnu os: ubuntu-latest - - tuple: hexagon-unknown-linux-musl - os: ubuntu-latest + # hexagon doesn't build at the moment due to a libc issue. + # - tuple: hexagon-unknown-linux-musl + # os: ubuntu-latest - tuple: wasm32-wasip1 os: ubuntu-latest @@ -209,11 +210,12 @@ jobs: tuple: amdgcn-amd-amdhsa os: ubuntu-latest norun: true - - target: - tuple: hexagon-unknown-linux-musl - os: ubuntu-latest - norun: true - build_std: true + # hexagon doesn't build at the moment due to a libc issue. + # - target: + # tuple: hexagon-unknown-linux-musl + # os: ubuntu-latest + # norun: true + # build_std: true steps: - uses: actions/checkout@v6 From 8f3850c74cce34a0662bae21fea0e415ead4b985 Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Thu, 19 Mar 2026 15:08:04 +0100 Subject: [PATCH 17/64] check that store/load rountrip initializes all bytes --- library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs index 2fbd2255aa0fd..29a278b80df84 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs @@ -1022,8 +1022,12 @@ mod tests { ($elem_ty:ty, $len:expr, $vec_ty:ty, $store:expr, $load:expr) => { let vals: [$elem_ty; $len] = crate::array::from_fn(|i| i as $elem_ty); let a: $vec_ty = transmute(vals); - let mut tmp = [0 as $elem_ty; $len]; + let mut tmp = core::mem::MaybeUninit::<[$elem_ty; $len]>::uninit(); $store(tmp.as_mut_ptr().cast(), a); + + // With Miri this will check that all elements were initialized. + let tmp = tmp.assume_init(); + let r: $vec_ty = $load(tmp.as_ptr().cast()); let out: [$elem_ty; $len] = transmute(r); assert_eq!(out, vals); From cb0d495daed94c1b68aca01081be183271ec061c Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Thu, 19 Mar 2026 15:38:10 +0100 Subject: [PATCH 18/64] support roundtrip of `vst3q` --- .../core_arch/src/aarch64/neon/generated.rs | 18 +---- .../src/arm_shared/neon/generated.rs | 72 +++---------------- .../spec/neon/aarch64.spec.yml | 13 +--- .../spec/neon/arm_shared.spec.yml | 29 +++----- 4 files changed, 22 insertions(+), 110 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs index 74af50016690b..34303b706c526 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs @@ -12127,14 +12127,7 @@ pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v2f64.p0" - )] - fn _vld3q_f64(ptr: *const float64x2_t) -> float64x2x3_t; - } - _vld3q_f64(a as _) + crate::core_arch::macros::deinterleaving_load!(f64, 2, 3, a) } #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)"] @@ -12145,14 +12138,7 @@ pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v2i64.p0" - )] - fn _vld3q_s64(ptr: *const int64x2_t) -> int64x2x3_t; - } - _vld3q_s64(a as _) + crate::core_arch::macros::deinterleaving_load!(i64, 2, 3, a) } #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)"] diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs index 4a846e2877462..cf4d10162ec9a 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs @@ -67158,14 +67158,7 @@ pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v2f32.p0" - )] - fn _vst3_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t, ptr: *mut i8); - } - _vst3_f32(b.0, b.1, b.2, a as _) + crate::core_arch::macros::interleaving_store!(f32, 2, 3, a, b) } #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] @@ -67177,14 +67170,7 @@ pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v4f32.p0" - )] - fn _vst3q_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t, ptr: *mut i8); - } - _vst3q_f32(b.0, b.1, b.2, a as _) + crate::core_arch::macros::interleaving_store!(f32, 4, 3, a, b) } #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] @@ -67196,14 +67182,7 @@ pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v8i8.p0" - )] - fn _vst3_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, ptr: *mut i8); - } - _vst3_s8(b.0, b.1, b.2, a as _) + crate::core_arch::macros::interleaving_store!(i8, 8, 3, a, b) } #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] @@ -67215,14 +67194,7 @@ pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v16i8.p0" - )] - fn _vst3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, ptr: *mut i8); - } - _vst3q_s8(b.0, b.1, b.2, a as _) + crate::core_arch::macros::interleaving_store!(i8, 16, 3, a, b) } #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] @@ -67234,14 +67206,7 @@ pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v4i16.p0" - )] - fn _vst3_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t, ptr: *mut i8); - } - _vst3_s16(b.0, b.1, b.2, a as _) + crate::core_arch::macros::interleaving_store!(i16, 4, 3, a, b) } #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] @@ -67253,14 +67218,7 @@ pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v8i16.p0" - )] - fn _vst3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t, ptr: *mut i8); - } - _vst3q_s16(b.0, b.1, b.2, a as _) + crate::core_arch::macros::interleaving_store!(i16, 8, 3, a, b) } #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] @@ -67272,14 +67230,7 @@ pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v2i32.p0" - )] - fn _vst3_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t, ptr: *mut i8); - } - _vst3_s32(b.0, b.1, b.2, a as _) + crate::core_arch::macros::interleaving_store!(i32, 2, 3, a, b) } #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] @@ -67291,14 +67242,7 @@ pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v4i32.p0" - )] - fn _vst3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t, ptr: *mut i8); - } - _vst3q_s32(b.0, b.1, b.2, a as _) + crate::core_arch::macros::interleaving_store!(i32, 4, 3, a, b) } #[doc = "Store multiple 3-element structures from three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f16)"] diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml index e88860717b6df..dfcdfb59a5c1a 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml @@ -4031,17 +4031,10 @@ intrinsics: unsafe: [neon] assert_instr: [ld3] types: - - ['*const i64', int64x2x3_t, '*const int64x2_t', i64] - - ['*const f64', float64x2x3_t, '*const float64x2_t', f64] + - ['*const i64', int64x2x3_t, i64, "2"] + - ['*const f64', float64x2x3_t, f64, "2"] compose: - - LLVMLink: - name: 'vld3{neon_type[1].nox}' - arguments: - - 'ptr: {type[2]}' - links: - - link: 'llvm.aarch64.neon.ld3.v{neon_type[1].lane}{type[3]}.p0' - arch: aarch64,arm64ec - - FnCall: ['_vld3{neon_type[1].nox}', ['a as _']] + - FnCall: ["crate::core_arch::macros::deinterleaving_load!", [{ Type: "{type[2]}" }, "{type[3]}", "3", a], [], true] - name: "vld3{neon_type[1].nox}" doc: Load multiple 3-element structures to three registers diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml index 56b2252c9ef0c..f6ef7f17d73b7 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml @@ -5642,27 +5642,16 @@ intrinsics: safety: unsafe: [neon] types: - - [i8, int8x8x3_t, int8x8_t] - - [i16, int16x4x3_t, int16x4_t] - - [i32, int32x2x3_t, int32x2_t] - - [i8, int8x16x3_t, int8x16_t] - - [i16, int16x8x3_t, int16x8_t] - - [i32, int32x4x3_t, int32x4_t] - - [f32, float32x2x3_t, float32x2_t] - - [f32, float32x4x3_t, float32x4_t] + - [i8, int8x8x3_t, "8"] + - [i16, int16x4x3_t, "4"] + - [i32, int32x2x3_t, "2"] + - [i8, int8x16x3_t, "16"] + - [i16, int16x8x3_t, "8"] + - [i32, int32x4x3_t, "4"] + - [f32, float32x2x3_t, "2"] + - [f32, float32x4x3_t, "4"] compose: - - LLVMLink: - name: 'vst3.{neon_type[1]}' - arguments: - - 'a: {type[2]}' - - 'b: {type[2]}' - - 'c: {type[2]}' - - 'ptr: *mut i8' - links: - - link: 'llvm.aarch64.neon.st3.v{neon_type[1].lane}{type[0]}.p0' - arch: aarch64,arm64ec - - FnCall: ['_vst3{neon_type[1].nox}', ['b.0', 'b.1', 'b.2', 'a as _']] - + - FnCall: ["crate::core_arch::macros::interleaving_store!", [{ Type: "{type[0]}" }, "{type[2]}", "3", a, b], [], true] - name: "vst3{neon_type[1].nox}" doc: "Store multiple 3-element structures from three registers" From 4b52401221a25d72afb1abd40eba9b0d543343e4 Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Thu, 19 Mar 2026 15:46:27 +0100 Subject: [PATCH 19/64] run `test_vld3q` tests with miri on CI --- library/stdarch/.github/workflows/main.yml | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/library/stdarch/.github/workflows/main.yml b/library/stdarch/.github/workflows/main.yml index 966ba0ba9e96a..5520bc9b972cf 100644 --- a/library/stdarch/.github/workflows/main.yml +++ b/library/stdarch/.github/workflows/main.yml @@ -272,7 +272,7 @@ jobs: intrinsic-test: needs: [style] name: Intrinsic Test - runs-on: ubuntu-latest + runs-on: ubuntu-latest strategy: matrix: target: @@ -332,11 +332,30 @@ jobs: cargo run -p stdarch-gen-hexagon --release git diff --exit-code + # Run some tests with Miri. Most stdarch functions use platform-specific intrinsics + # that Miri does not support. Also Miri is reltively slow. + # + # Below we run some tests where Miri might catch UB, for instance on intrinsics that read from + # or write to pointers. + miri: + needs: [style] + name: Run some tests with miri + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - name: Install Rust + run: rustup update nightly && rustup default nightly && rustup component add miri + - name: Aarch64 load/store roundtrip + env: + TARGET: "aarch64-unknown-linux-gnu" + run: cargo miri test -p core_arch --target aarch64-unknown-linux-gnu -- test_vld3q + conclusion: needs: - docs - verify - test + - miri - intrinsic-test - check-stdarch-gen runs-on: ubuntu-latest From 8e2069e76c8c40bf80863d4e4ad3da287e3efb68 Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Tue, 31 Mar 2026 23:11:51 +0200 Subject: [PATCH 20/64] run some aarch64 tests with miri on CI --- library/stdarch/.github/workflows/main.yml | 7 +++++-- library/stdarch/aarch64-miri-tests.txt | 3 +++ 2 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 library/stdarch/aarch64-miri-tests.txt diff --git a/library/stdarch/.github/workflows/main.yml b/library/stdarch/.github/workflows/main.yml index 5520bc9b972cf..1f598f6e20d4c 100644 --- a/library/stdarch/.github/workflows/main.yml +++ b/library/stdarch/.github/workflows/main.yml @@ -345,10 +345,13 @@ jobs: - uses: actions/checkout@v6 - name: Install Rust run: rustup update nightly && rustup default nightly && rustup component add miri - - name: Aarch64 load/store roundtrip + - name: Run miri tests env: TARGET: "aarch64-unknown-linux-gnu" - run: cargo miri test -p core_arch --target aarch64-unknown-linux-gnu -- test_vld3q + run: | + # read filters and join them with a space. + FILTERS=$(cat aarch64-miri-tests.txt | tr '\n' ' ') + cargo miri test -p core_arch --target aarch64-unknown-linux-gnu -- $FILTERS conclusion: needs: diff --git a/library/stdarch/aarch64-miri-tests.txt b/library/stdarch/aarch64-miri-tests.txt new file mode 100644 index 0000000000000..2c66cc5eea771 --- /dev/null +++ b/library/stdarch/aarch64-miri-tests.txt @@ -0,0 +1,3 @@ +test_vld3q +neon::load_tests +neon::store_tests From 548d790dd8390718a6fe96c8e1a047f9a86cedb1 Mon Sep 17 00:00:00 2001 From: WANG Rui Date: Tue, 31 Mar 2026 09:40:38 +0800 Subject: [PATCH 21/64] loongarch: Remove unnecessary `transmute` calls This commit removes unnecessary `transmute` calls to resolve clippy warnings. --- .../core_arch/src/loongarch64/lasx/generated.rs | 12 ++++++------ .../core_arch/src/loongarch64/lsx/generated.rs | 12 ++++++------ .../stdarch/crates/stdarch-gen-loongarch/src/main.rs | 6 +++--- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs b/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs index d2e1a87fde46f..5559c6ad4d0e8 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs @@ -5044,7 +5044,7 @@ pub unsafe fn lasx_xvld(mem_addr: *const i8) -> m256i { #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn lasx_xvst(a: m256i, mem_addr: *mut i8) { static_assert_simm_bits!(IMM_S12, 12); - transmute(__lasx_xvst(transmute(a), mem_addr, IMM_S12)) + __lasx_xvst(transmute(a), mem_addr, IMM_S12) } #[inline(always)] @@ -5054,7 +5054,7 @@ pub unsafe fn lasx_xvst(a: m256i, mem_addr: *mut i8) { pub unsafe fn lasx_xvstelm_b(a: m256i, mem_addr: *mut i8) { static_assert_simm_bits!(IMM_S8, 8); static_assert_uimm_bits!(IMM4, 4); - transmute(__lasx_xvstelm_b(transmute(a), mem_addr, IMM_S8, IMM4)) + __lasx_xvstelm_b(transmute(a), mem_addr, IMM_S8, IMM4) } #[inline(always)] @@ -5064,7 +5064,7 @@ pub unsafe fn lasx_xvstelm_b(a: m256i, mem_a pub unsafe fn lasx_xvstelm_h(a: m256i, mem_addr: *mut i8) { static_assert_simm_bits!(IMM_S8, 8); static_assert_uimm_bits!(IMM3, 3); - transmute(__lasx_xvstelm_h(transmute(a), mem_addr, IMM_S8, IMM3)) + __lasx_xvstelm_h(transmute(a), mem_addr, IMM_S8, IMM3) } #[inline(always)] @@ -5074,7 +5074,7 @@ pub unsafe fn lasx_xvstelm_h(a: m256i, mem_a pub unsafe fn lasx_xvstelm_w(a: m256i, mem_addr: *mut i8) { static_assert_simm_bits!(IMM_S8, 8); static_assert_uimm_bits!(IMM2, 2); - transmute(__lasx_xvstelm_w(transmute(a), mem_addr, IMM_S8, IMM2)) + __lasx_xvstelm_w(transmute(a), mem_addr, IMM_S8, IMM2) } #[inline(always)] @@ -5084,7 +5084,7 @@ pub unsafe fn lasx_xvstelm_w(a: m256i, mem_a pub unsafe fn lasx_xvstelm_d(a: m256i, mem_addr: *mut i8) { static_assert_simm_bits!(IMM_S8, 8); static_assert_uimm_bits!(IMM1, 1); - transmute(__lasx_xvstelm_d(transmute(a), mem_addr, IMM_S8, IMM1)) + __lasx_xvstelm_d(transmute(a), mem_addr, IMM_S8, IMM1) } #[inline(always)] @@ -5192,7 +5192,7 @@ pub unsafe fn lasx_xvldx(mem_addr: *const i8, b: i64) -> m256i { #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn lasx_xvstx(a: m256i, mem_addr: *mut i8, b: i64) { - transmute(__lasx_xvstx(transmute(a), mem_addr, transmute(b))) + __lasx_xvstx(transmute(a), mem_addr, transmute(b)) } #[inline(always)] diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs b/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs index 679c82079cb88..faa8859eba777 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs @@ -5052,7 +5052,7 @@ pub fn lsx_vfrintrm_d(a: m128d) -> m128d { pub unsafe fn lsx_vstelm_b(a: m128i, mem_addr: *mut i8) { static_assert_simm_bits!(IMM_S8, 8); static_assert_uimm_bits!(IMM4, 4); - transmute(__lsx_vstelm_b(transmute(a), mem_addr, IMM_S8, IMM4)) + __lsx_vstelm_b(transmute(a), mem_addr, IMM_S8, IMM4) } #[inline(always)] @@ -5062,7 +5062,7 @@ pub unsafe fn lsx_vstelm_b(a: m128i, mem_add pub unsafe fn lsx_vstelm_h(a: m128i, mem_addr: *mut i8) { static_assert_simm_bits!(IMM_S8, 8); static_assert_uimm_bits!(IMM3, 3); - transmute(__lsx_vstelm_h(transmute(a), mem_addr, IMM_S8, IMM3)) + __lsx_vstelm_h(transmute(a), mem_addr, IMM_S8, IMM3) } #[inline(always)] @@ -5072,7 +5072,7 @@ pub unsafe fn lsx_vstelm_h(a: m128i, mem_add pub unsafe fn lsx_vstelm_w(a: m128i, mem_addr: *mut i8) { static_assert_simm_bits!(IMM_S8, 8); static_assert_uimm_bits!(IMM2, 2); - transmute(__lsx_vstelm_w(transmute(a), mem_addr, IMM_S8, IMM2)) + __lsx_vstelm_w(transmute(a), mem_addr, IMM_S8, IMM2) } #[inline(always)] @@ -5082,7 +5082,7 @@ pub unsafe fn lsx_vstelm_w(a: m128i, mem_add pub unsafe fn lsx_vstelm_d(a: m128i, mem_addr: *mut i8) { static_assert_simm_bits!(IMM_S8, 8); static_assert_uimm_bits!(IMM1, 1); - transmute(__lsx_vstelm_d(transmute(a), mem_addr, IMM_S8, IMM1)) + __lsx_vstelm_d(transmute(a), mem_addr, IMM_S8, IMM1) } #[inline(always)] @@ -6376,7 +6376,7 @@ pub unsafe fn lsx_vld(mem_addr: *const i8) -> m128i { #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn lsx_vst(a: m128i, mem_addr: *mut i8) { static_assert_simm_bits!(IMM_S12, 12); - transmute(__lsx_vst(transmute(a), mem_addr, IMM_S12)) + __lsx_vst(transmute(a), mem_addr, IMM_S12) } #[inline(always)] @@ -6455,7 +6455,7 @@ pub unsafe fn lsx_vldx(mem_addr: *const i8, b: i64) -> m128i { #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn lsx_vstx(a: m128i, mem_addr: *mut i8, b: i64) { - transmute(__lsx_vstx(transmute(a), mem_addr, transmute(b))) + __lsx_vstx(transmute(a), mem_addr, transmute(b)) } #[inline(always)] diff --git a/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs b/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs index fdc9b2b6bcabe..fe767fc30917d 100644 --- a/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs +++ b/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs @@ -571,21 +571,21 @@ fn gen_bind_body( } else if para_num == 3 && in_t[1] == "CVPOINTER" && in_t[2] == "SI" { call_params = match asm_fmts[2].as_str() { "si12" => format!( - "static_assert_simm_bits!(IMM_S12, 12);\n {unsafe_start}transmute(__{current_name}(transmute(a), mem_addr, IMM_S12)){unsafe_end}" + "static_assert_simm_bits!(IMM_S12, 12);\n {unsafe_start}__{current_name}(transmute(a), mem_addr, IMM_S12){unsafe_end}" ), _ => panic!("unsupported assembly format: {}", asm_fmts[2]), }; } else if para_num == 3 && in_t[1] == "CVPOINTER" && in_t[2] == "DI" { call_params = match asm_fmts[2].as_str() { "rk" => format!( - "{unsafe_start}transmute(__{current_name}(transmute(a), mem_addr, transmute(b))){unsafe_end}" + "{unsafe_start}__{current_name}(transmute(a), mem_addr, transmute(b)){unsafe_end}" ), _ => panic!("unsupported assembly format: {}", asm_fmts[2]), }; } else if para_num == 4 { call_params = match (asm_fmts[2].as_str(), current_name.chars().last().unwrap()) { ("si8", t) => format!( - "static_assert_simm_bits!(IMM_S8, 8);\n static_assert_uimm_bits!(IMM{0}, {0});\n {unsafe_start}transmute(__{current_name}(transmute(a), mem_addr, IMM_S8, IMM{0})){unsafe_end}", + "static_assert_simm_bits!(IMM_S8, 8);\n static_assert_uimm_bits!(IMM{0}, {0});\n {unsafe_start}__{current_name}(transmute(a), mem_addr, IMM_S8, IMM{0}){unsafe_end}", type_to_imm(t) ), (_, _) => panic!( From 5a44da6067e49789a4a1ba9080ae2e5b0dacd82b Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Wed, 8 Apr 2026 10:44:44 +0200 Subject: [PATCH 22/64] make `vld4q` portable --- library/stdarch/aarch64-miri-tests.txt | 3 ++- .../core_arch/src/aarch64/neon/generated.rs | 18 ++---------------- .../stdarch-gen-arm/spec/neon/aarch64.spec.yml | 14 ++++---------- 3 files changed, 8 insertions(+), 27 deletions(-) diff --git a/library/stdarch/aarch64-miri-tests.txt b/library/stdarch/aarch64-miri-tests.txt index 2c66cc5eea771..2c0dbb8297a1f 100644 --- a/library/stdarch/aarch64-miri-tests.txt +++ b/library/stdarch/aarch64-miri-tests.txt @@ -1,3 +1,4 @@ -test_vld3q +test_vld3 +test_vld4 neon::load_tests neon::store_tests diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs index 34303b706c526..c9ce7a69a6578 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs @@ -12521,14 +12521,7 @@ pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v2f64.p0" - )] - fn _vld4q_f64(ptr: *const float64x2_t) -> float64x2x4_t; - } - _vld4q_f64(a as _) + crate::core_arch::macros::deinterleaving_load!(f64, 2, 4, a) } #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)"] @@ -12539,14 +12532,7 @@ pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v2i64.p0" - )] - fn _vld4q_s64(ptr: *const int64x2_t) -> int64x2x4_t; - } - _vld4q_s64(a as _) + crate::core_arch::macros::deinterleaving_load!(i64, 2, 4, a) } #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)"] diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml index dfcdfb59a5c1a..a769d352649c9 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml @@ -4167,17 +4167,11 @@ intrinsics: safety: unsafe: [neon] types: - - ['*const f64', float64x2x4_t, f64, '*const float64x2_t'] - - ['*const i64', int64x2x4_t, i64, '*const int64x2_t'] + - ['*const f64', float64x2x4_t, f64, "2"] + - ['*const i64', int64x2x4_t, i64, "2"] compose: - - LLVMLink: - name: 'vld4{neon_type[1].nox}' - arguments: - - 'ptr: {type[3]}' - links: - - link: 'llvm.aarch64.neon.ld4.v{neon_type[1].lane}{type[2]}.p0' - arch: aarch64,arm64ec - - FnCall: ['_vld4{neon_type[1].nox}', ['a as _']] + - FnCall: ["crate::core_arch::macros::deinterleaving_load!", [{ Type: "{type[2]}" }, "{type[3]}", "4", a], [], true] + - name: "vld4{neon_type[1].nox}" doc: Load multiple 4-element structures to four registers From f8879e326f7da30a931995296c83ed89f9a4563f Mon Sep 17 00:00:00 2001 From: Brian Cain Date: Wed, 8 Apr 2026 09:52:49 -0700 Subject: [PATCH 23/64] hexagon: Preserve original Q6 naming case for HVX intrinsics --- .../crates/core_arch/src/hexagon/v128.rs | 965 +++++++++--------- .../crates/core_arch/src/hexagon/v64.rs | 965 +++++++++--------- .../crates/stdarch-gen-hexagon/src/main.rs | 23 +- library/stdarch/examples/gaussian.rs | 54 +- 4 files changed, 1020 insertions(+), 987 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/hexagon/v128.rs b/library/stdarch/crates/core_arch/src/hexagon/v128.rs index ef7ff4205c71d..10263382938b6 100644 --- a/library/stdarch/crates/core_arch/src/hexagon/v128.rs +++ b/library/stdarch/crates/core_arch/src/hexagon/v128.rs @@ -15,6 +15,18 @@ //! //! To use this module, compile with `-C target-feature=+hvx-length128b`. //! +//! ## Naming Convention +//! +//! Function names preserve the original Q6 naming case because the convention +//! uses case to distinguish register types: +//! - `W` (uppercase) = vector pair (`HvxVectorPair`) +//! - `V` (uppercase) = vector (`HvxVector`) +//! - `Q` (uppercase) = predicate (`HvxVectorPred`) +//! - `R` = scalar register (`i32`) +//! +//! For example, `Q6_W_vcombine_VV` operates on a vector pair while +//! `Q6_V_hi_W` extracts a vector from a pair. +//! //! ## Architecture Versions //! //! Different intrinsics require different HVX architecture versions. Use the @@ -31,6 +43,7 @@ //! Each version includes all features from previous versions. #![allow(non_camel_case_types)] +#![allow(non_snake_case)] #[cfg(test)] use stdarch_test::assert_instr; @@ -1034,7 +1047,7 @@ unsafe extern "unadjusted" { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(extractw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_r_vextract_vr(vu: HvxVector, rs: i32) -> i32 { +pub unsafe fn Q6_R_vextract_VR(vu: HvxVector, rs: i32) -> i32 { extractw(vu, rs) } @@ -1046,7 +1059,7 @@ pub unsafe fn q6_r_vextract_vr(vu: HvxVector, rs: i32) -> i32 { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(hi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_hi_w(vss: HvxVectorPair) -> HvxVector { +pub unsafe fn Q6_V_hi_W(vss: HvxVectorPair) -> HvxVector { hi(vss) } @@ -1058,7 +1071,7 @@ pub unsafe fn q6_v_hi_w(vss: HvxVectorPair) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(lo))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_lo_w(vss: HvxVectorPair) -> HvxVector { +pub unsafe fn Q6_V_lo_W(vss: HvxVectorPair) -> HvxVector { lo(vss) } @@ -1070,7 +1083,7 @@ pub unsafe fn q6_v_lo_w(vss: HvxVectorPair) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(lvsplatw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vsplat_r(rt: i32) -> HvxVector { +pub unsafe fn Q6_V_vsplat_R(rt: i32) -> HvxVector { lvsplatw(rt) } @@ -1082,7 +1095,7 @@ pub unsafe fn q6_v_vsplat_r(rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsdiffh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vabsdiff_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vabsdiff_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vabsdiffh(vu, vv) } @@ -1094,7 +1107,7 @@ pub unsafe fn q6_vuh_vabsdiff_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsdiffub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vabsdiff_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vabsdiff_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector { vabsdiffub(vu, vv) } @@ -1106,7 +1119,7 @@ pub unsafe fn q6_vub_vabsdiff_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsdiffuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vabsdiff_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vabsdiff_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVector { vabsdiffuh(vu, vv) } @@ -1118,7 +1131,7 @@ pub unsafe fn q6_vuh_vabsdiff_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsdiffw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vabsdiff_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuw_vabsdiff_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vabsdiffw(vu, vv) } @@ -1130,7 +1143,7 @@ pub unsafe fn q6_vuw_vabsdiff_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vabs_vh(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vabs_Vh(vu: HvxVector) -> HvxVector { vabsh(vu) } @@ -1142,7 +1155,7 @@ pub unsafe fn q6_vh_vabs_vh(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsh_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vabs_vh_sat(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vabs_Vh_sat(vu: HvxVector) -> HvxVector { vabsh_sat(vu) } @@ -1154,7 +1167,7 @@ pub unsafe fn q6_vh_vabs_vh_sat(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vabs_vw(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vabs_Vw(vu: HvxVector) -> HvxVector { vabsw(vu) } @@ -1166,7 +1179,7 @@ pub unsafe fn q6_vw_vabs_vw(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsw_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vabs_vw_sat(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vabs_Vw_sat(vu: HvxVector) -> HvxVector { vabsw_sat(vu) } @@ -1178,7 +1191,7 @@ pub unsafe fn q6_vw_vabs_vw_sat(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vadd_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vadd_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { vaddb(vu, vv) } @@ -1190,7 +1203,7 @@ pub unsafe fn q6_vb_vadd_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddb_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wb_vadd_wbwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wb_vadd_WbWb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vaddb_dv(vuu, vvv) } @@ -1202,7 +1215,7 @@ pub unsafe fn q6_wb_vadd_wbwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vadd_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vadd_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vaddh(vu, vv) } @@ -1214,7 +1227,7 @@ pub unsafe fn q6_vh_vadd_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddh_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vadd_whwh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vadd_WhWh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vaddh_dv(vuu, vvv) } @@ -1226,7 +1239,7 @@ pub unsafe fn q6_wh_vadd_whwh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vadd_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vadd_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vaddhsat(vu, vv) } @@ -1238,7 +1251,7 @@ pub unsafe fn q6_vh_vadd_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddhsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vadd_whwh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vadd_WhWh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vaddhsat_dv(vuu, vvv) } @@ -1250,7 +1263,7 @@ pub unsafe fn q6_wh_vadd_whwh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vadd_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vadd_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vaddhw(vu, vv) } @@ -1262,7 +1275,7 @@ pub unsafe fn q6_ww_vadd_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddubh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vadd_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vadd_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vaddubh(vu, vv) } @@ -1274,7 +1287,7 @@ pub unsafe fn q6_wh_vadd_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddubsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vadd_vubvub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vadd_VubVub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vaddubsat(vu, vv) } @@ -1286,7 +1299,7 @@ pub unsafe fn q6_vub_vadd_vubvub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddubsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wub_vadd_wubwub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wub_vadd_WubWub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vaddubsat_dv(vuu, vvv) } @@ -1298,7 +1311,7 @@ pub unsafe fn q6_wub_vadd_wubwub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vadduhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vadd_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vadd_VuhVuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vadduhsat(vu, vv) } @@ -1310,7 +1323,7 @@ pub unsafe fn q6_vuh_vadd_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vadduhsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuh_vadd_wuhwuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wuh_vadd_WuhWuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vadduhsat_dv(vuu, vvv) } @@ -1322,7 +1335,7 @@ pub unsafe fn q6_wuh_vadd_wuhwuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vadduhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vadd_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vadd_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vadduhw(vu, vv) } @@ -1334,7 +1347,7 @@ pub unsafe fn q6_ww_vadd_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vadd_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vadd_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { simd_add(vu, vv) } @@ -1346,7 +1359,7 @@ pub unsafe fn q6_vw_vadd_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddw_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vadd_wwww(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vadd_WwWw(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vaddw_dv(vuu, vvv) } @@ -1358,7 +1371,7 @@ pub unsafe fn q6_ww_vadd_wwww(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddwsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vadd_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vadd_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vaddwsat(vu, vv) } @@ -1370,7 +1383,7 @@ pub unsafe fn q6_vw_vadd_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddwsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vadd_wwww_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vadd_WwWw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vaddwsat_dv(vuu, vvv) } @@ -1382,7 +1395,7 @@ pub unsafe fn q6_ww_vadd_wwww_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(valignb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_valign_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_V_valign_VVR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { valignb(vu, vv, rt) } @@ -1394,7 +1407,7 @@ pub unsafe fn q6_v_valign_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(valignbi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_valign_vvi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { +pub unsafe fn Q6_V_valign_VVI(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { valignbi(vu, vv, iu3) } @@ -1406,7 +1419,7 @@ pub unsafe fn q6_v_valign_vvi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vand))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vand_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vand_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { simd_and(vu, vv) } @@ -1418,7 +1431,7 @@ pub unsafe fn q6_v_vand_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaslh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vasl_vhr(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vasl_VhR(vu: HvxVector, rt: i32) -> HvxVector { vaslh(vu, rt) } @@ -1430,7 +1443,7 @@ pub unsafe fn q6_vh_vasl_vhr(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaslhv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vasl_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vasl_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vaslhv(vu, vv) } @@ -1442,7 +1455,7 @@ pub unsafe fn q6_vh_vasl_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaslw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vasl_vwr(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vasl_VwR(vu: HvxVector, rt: i32) -> HvxVector { vaslw(vu, rt) } @@ -1454,7 +1467,7 @@ pub unsafe fn q6_vw_vasl_vwr(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaslw_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vaslacc_vwvwr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vaslacc_VwVwR(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vaslw_acc(vx, vu, rt) } @@ -1466,7 +1479,7 @@ pub unsafe fn q6_vw_vaslacc_vwvwr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxV #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaslwv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vasl_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vasl_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vaslwv(vu, vv) } @@ -1478,7 +1491,7 @@ pub unsafe fn q6_vw_vasl_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vasr_vhr(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vasr_VhR(vu: HvxVector, rt: i32) -> HvxVector { vasrh(vu, rt) } @@ -1490,7 +1503,7 @@ pub unsafe fn q6_vh_vasr_vhr(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrhbrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vasr_vhvhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vb_vasr_VhVhR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasrhbrndsat(vu, vv, rt) } @@ -1502,7 +1515,7 @@ pub unsafe fn q6_vb_vasr_vhvhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrhubrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vasr_vhvhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vub_vasr_VhVhR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasrhubrndsat(vu, vv, rt) } @@ -1514,7 +1527,7 @@ pub unsafe fn q6_vub_vasr_vhvhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) - #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrhubsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vasr_vhvhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vub_vasr_VhVhR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasrhubsat(vu, vv, rt) } @@ -1526,7 +1539,7 @@ pub unsafe fn q6_vub_vasr_vhvhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> Hv #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrhv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vasr_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vasr_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vasrhv(vu, vv) } @@ -1538,7 +1551,7 @@ pub unsafe fn q6_vh_vasr_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vasr_vwr(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vasr_VwR(vu: HvxVector, rt: i32) -> HvxVector { vasrw(vu, rt) } @@ -1550,7 +1563,7 @@ pub unsafe fn q6_vw_vasr_vwr(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrw_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vasracc_vwvwr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vasracc_VwVwR(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vasrw_acc(vx, vu, rt) } @@ -1562,7 +1575,7 @@ pub unsafe fn q6_vw_vasracc_vwvwr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxV #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrwh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vasr_vwvwr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vasr_VwVwR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasrwh(vu, vv, rt) } @@ -1574,7 +1587,7 @@ pub unsafe fn q6_vh_vasr_vwvwr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrwhrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vasr_vwvwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vasr_VwVwR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasrwhrndsat(vu, vv, rt) } @@ -1586,7 +1599,7 @@ pub unsafe fn q6_vh_vasr_vwvwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrwhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vasr_vwvwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vasr_VwVwR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasrwhsat(vu, vv, rt) } @@ -1598,7 +1611,7 @@ pub unsafe fn q6_vh_vasr_vwvwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> Hvx #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrwuhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vasr_vwvwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vuh_vasr_VwVwR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasrwuhsat(vu, vv, rt) } @@ -1610,7 +1623,7 @@ pub unsafe fn q6_vuh_vasr_vwvwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> Hv #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrwv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vasr_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vasr_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vasrwv(vu, vv) } @@ -1622,7 +1635,7 @@ pub unsafe fn q6_vw_vasr_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vassign))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_equals_v(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_equals_V(vu: HvxVector) -> HvxVector { vassign(vu) } @@ -1634,7 +1647,7 @@ pub unsafe fn q6_v_equals_v(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vassignp))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_w_equals_w(vuu: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_W_equals_W(vuu: HvxVectorPair) -> HvxVectorPair { vassignp(vuu) } @@ -1646,7 +1659,7 @@ pub unsafe fn q6_w_equals_w(vuu: HvxVectorPair) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavgh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vavg_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vavg_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vavgh(vu, vv) } @@ -1658,7 +1671,7 @@ pub unsafe fn q6_vh_vavg_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavghrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vavg_vhvh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vavg_VhVh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { vavghrnd(vu, vv) } @@ -1670,7 +1683,7 @@ pub unsafe fn q6_vh_vavg_vhvh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavgub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vavg_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vavg_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector { vavgub(vu, vv) } @@ -1682,7 +1695,7 @@ pub unsafe fn q6_vub_vavg_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavgubrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vavg_vubvub_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vavg_VubVub_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { vavgubrnd(vu, vv) } @@ -1694,7 +1707,7 @@ pub unsafe fn q6_vub_vavg_vubvub_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavguh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vavg_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vavg_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVector { vavguh(vu, vv) } @@ -1706,7 +1719,7 @@ pub unsafe fn q6_vuh_vavg_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavguhrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vavg_vuhvuh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vavg_VuhVuh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { vavguhrnd(vu, vv) } @@ -1718,7 +1731,7 @@ pub unsafe fn q6_vuh_vavg_vuhvuh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavgw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vavg_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vavg_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vavgw(vu, vv) } @@ -1730,7 +1743,7 @@ pub unsafe fn q6_vw_vavg_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavgwrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vavg_vwvw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vavg_VwVw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { vavgwrnd(vu, vv) } @@ -1742,7 +1755,7 @@ pub unsafe fn q6_vw_vavg_vwvw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vcl0h))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vcl0_vuh(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vcl0_Vuh(vu: HvxVector) -> HvxVector { vcl0h(vu) } @@ -1754,7 +1767,7 @@ pub unsafe fn q6_vuh_vcl0_vuh(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vcl0w))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vcl0_vuw(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuw_vcl0_Vuw(vu: HvxVector) -> HvxVector { vcl0w(vu) } @@ -1766,7 +1779,7 @@ pub unsafe fn q6_vuw_vcl0_vuw(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vcombine))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_w_vcombine_vv(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_W_vcombine_VV(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vcombine(vu, vv) } @@ -1778,7 +1791,7 @@ pub unsafe fn q6_w_vcombine_vv(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vd0))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vzero() -> HvxVector { +pub unsafe fn Q6_V_vzero() -> HvxVector { vd0() } @@ -1790,7 +1803,7 @@ pub unsafe fn q6_v_vzero() -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdealb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vdeal_vb(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vdeal_Vb(vu: HvxVector) -> HvxVector { vdealb(vu) } @@ -1802,7 +1815,7 @@ pub unsafe fn q6_vb_vdeal_vb(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdealb4w))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vdeale_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vdeale_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { vdealb4w(vu, vv) } @@ -1814,7 +1827,7 @@ pub unsafe fn q6_vb_vdeale_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdealh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vdeal_vh(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vdeal_Vh(vu: HvxVector) -> HvxVector { vdealh(vu) } @@ -1826,7 +1839,7 @@ pub unsafe fn q6_vh_vdeal_vh(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdealvdd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_w_vdeal_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_W_vdeal_VVR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { vdealvdd(vu, vv, rt) } @@ -1838,7 +1851,7 @@ pub unsafe fn q6_w_vdeal_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdelta))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vdelta_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vdelta_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { vdelta(vu, vv) } @@ -1850,7 +1863,7 @@ pub unsafe fn q6_v_vdelta_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpybus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vdmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vdmpy_VubRb(vu: HvxVector, rt: i32) -> HvxVector { vdmpybus(vu, rt) } @@ -1862,7 +1875,7 @@ pub unsafe fn q6_vh_vdmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpybus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vdmpyacc_vhvubrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vdmpyacc_VhVubRb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vdmpybus_acc(vx, vu, rt) } @@ -1874,7 +1887,7 @@ pub unsafe fn q6_vh_vdmpyacc_vhvubrb(vx: HvxVector, vu: HvxVector, rt: i32) -> H #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpybus_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vdmpy_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vdmpy_WubRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { vdmpybus_dv(vuu, rt) } @@ -1886,7 +1899,7 @@ pub unsafe fn q6_wh_vdmpy_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpybus_dv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vdmpyacc_whwubrb( +pub unsafe fn Q6_Wh_vdmpyacc_WhWubRb( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -1902,7 +1915,7 @@ pub unsafe fn q6_wh_vdmpyacc_whwubrb( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpy_vhrb(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpy_VhRb(vu: HvxVector, rt: i32) -> HvxVector { vdmpyhb(vu, rt) } @@ -1914,7 +1927,7 @@ pub unsafe fn q6_vw_vdmpy_vhrb(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpyacc_vwvhrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpyacc_VwVhRb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vdmpyhb_acc(vx, vu, rt) } @@ -1926,7 +1939,7 @@ pub unsafe fn q6_vw_vdmpyacc_vwvhrb(vx: HvxVector, vu: HvxVector, rt: i32) -> Hv #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhb_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vdmpy_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vdmpy_WhRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { vdmpyhb_dv(vuu, rt) } @@ -1938,7 +1951,7 @@ pub unsafe fn q6_ww_vdmpy_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhb_dv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vdmpyacc_wwwhrb( +pub unsafe fn Q6_Ww_vdmpyacc_WwWhRb( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -1954,7 +1967,7 @@ pub unsafe fn q6_ww_vdmpyacc_wwwhrb( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhisat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpy_whrh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpy_WhRh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { vdmpyhisat(vuu, rt) } @@ -1966,7 +1979,7 @@ pub unsafe fn q6_vw_vdmpy_whrh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhisat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpyacc_vwwhrh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpyacc_VwWhRh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i32) -> HvxVector { vdmpyhisat_acc(vx, vuu, rt) } @@ -1978,7 +1991,7 @@ pub unsafe fn q6_vw_vdmpyacc_vwwhrh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpy_vhrh_sat(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpy_VhRh_sat(vu: HvxVector, rt: i32) -> HvxVector { vdmpyhsat(vu, rt) } @@ -1990,7 +2003,7 @@ pub unsafe fn q6_vw_vdmpy_vhrh_sat(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpyacc_vwvhrh_sat(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpyacc_VwVhRh_sat(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vdmpyhsat_acc(vx, vu, rt) } @@ -2002,7 +2015,7 @@ pub unsafe fn q6_vw_vdmpyacc_vwvhrh_sat(vx: HvxVector, vu: HvxVector, rt: i32) - #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsuisat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpy_whruh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpy_WhRuh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { vdmpyhsuisat(vuu, rt) } @@ -2014,7 +2027,7 @@ pub unsafe fn q6_vw_vdmpy_whruh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsuisat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpyacc_vwwhruh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpyacc_VwWhRuh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i32) -> HvxVector { vdmpyhsuisat_acc(vx, vuu, rt) } @@ -2026,7 +2039,7 @@ pub unsafe fn q6_vw_vdmpyacc_vwwhruh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsusat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpy_vhruh_sat(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpy_VhRuh_sat(vu: HvxVector, rt: i32) -> HvxVector { vdmpyhsusat(vu, rt) } @@ -2038,7 +2051,7 @@ pub unsafe fn q6_vw_vdmpy_vhruh_sat(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsusat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpyacc_vwvhruh_sat(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpyacc_VwVhRuh_sat(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vdmpyhsusat_acc(vx, vu, rt) } @@ -2050,7 +2063,7 @@ pub unsafe fn q6_vw_vdmpyacc_vwvhruh_sat(vx: HvxVector, vu: HvxVector, rt: i32) #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhvsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpy_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpy_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vdmpyhvsat(vu, vv) } @@ -2062,7 +2075,7 @@ pub unsafe fn q6_vw_vdmpy_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhvsat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpyacc_vwvhvh_sat(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpyacc_VwVhVh_sat(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { vdmpyhvsat_acc(vx, vu, vv) } @@ -2074,7 +2087,7 @@ pub unsafe fn q6_vw_vdmpyacc_vwvhvh_sat(vx: HvxVector, vu: HvxVector, vv: HvxVec #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdsaduh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vdsad_wuhruh(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wuw_vdsad_WuhRuh(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { vdsaduh(vuu, rt) } @@ -2086,7 +2099,7 @@ pub unsafe fn q6_wuw_vdsad_wuhruh(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdsaduh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vdsadacc_wuwwuhruh( +pub unsafe fn Q6_Wuw_vdsadacc_WuwWuhRuh( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -2102,7 +2115,7 @@ pub unsafe fn q6_wuw_vdsadacc_wuwwuhruh( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vinsertwr))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vinsert_vwr(vx: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vinsert_VwR(vx: HvxVector, rt: i32) -> HvxVector { vinsertwr(vx, rt) } @@ -2114,7 +2127,7 @@ pub unsafe fn q6_vw_vinsert_vwr(vx: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlalignb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vlalign_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_V_vlalign_VVR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vlalignb(vu, vv, rt) } @@ -2126,7 +2139,7 @@ pub unsafe fn q6_v_vlalign_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlalignbi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vlalign_vvi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { +pub unsafe fn Q6_V_vlalign_VVI(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { vlalignbi(vu, vv, iu3) } @@ -2138,7 +2151,7 @@ pub unsafe fn q6_v_vlalign_vvi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVec #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlsrh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vlsr_vuhr(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vuh_vlsr_VuhR(vu: HvxVector, rt: i32) -> HvxVector { vlsrh(vu, rt) } @@ -2150,7 +2163,7 @@ pub unsafe fn q6_vuh_vlsr_vuhr(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlsrhv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vlsr_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vlsr_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vlsrhv(vu, vv) } @@ -2162,7 +2175,7 @@ pub unsafe fn q6_vh_vlsr_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlsrw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vlsr_vuwr(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vuw_vlsr_VuwR(vu: HvxVector, rt: i32) -> HvxVector { vlsrw(vu, rt) } @@ -2174,7 +2187,7 @@ pub unsafe fn q6_vuw_vlsr_vuwr(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlsrwv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vlsr_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vlsr_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vlsrwv(vu, vv) } @@ -2186,7 +2199,7 @@ pub unsafe fn q6_vw_vlsr_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlutvvb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vlut32_vbvbr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vb_vlut32_VbVbR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vlutvvb(vu, vv, rt) } @@ -2198,7 +2211,7 @@ pub unsafe fn q6_vb_vlut32_vbvbr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVe #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlutvvb_oracc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vlut32or_vbvbvbr( +pub unsafe fn Q6_Vb_vlut32or_VbVbVbR( vx: HvxVector, vu: HvxVector, vv: HvxVector, @@ -2215,7 +2228,7 @@ pub unsafe fn q6_vb_vlut32or_vbvbvbr( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlutvwh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vlut16_vbvhr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vlut16_VbVhR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { vlutvwh(vu, vv, rt) } @@ -2227,7 +2240,7 @@ pub unsafe fn q6_wh_vlut16_vbvhr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVe #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlutvwh_oracc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vlut16or_whvbvhr( +pub unsafe fn Q6_Wh_vlut16or_WhVbVhR( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -2244,7 +2257,7 @@ pub unsafe fn q6_wh_vlut16or_whvbvhr( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmaxh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vmax_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vmax_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vmaxh(vu, vv) } @@ -2256,7 +2269,7 @@ pub unsafe fn q6_vh_vmax_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmaxub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vmax_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vmax_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector { vmaxub(vu, vv) } @@ -2268,7 +2281,7 @@ pub unsafe fn q6_vub_vmax_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmaxuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vmax_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vmax_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVector { vmaxuh(vu, vv) } @@ -2280,7 +2293,7 @@ pub unsafe fn q6_vuh_vmax_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmaxw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmax_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vmax_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vmaxw(vu, vv) } @@ -2292,7 +2305,7 @@ pub unsafe fn q6_vw_vmax_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vminh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vmin_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vmin_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vminh(vu, vv) } @@ -2304,7 +2317,7 @@ pub unsafe fn q6_vh_vmin_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vminub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vmin_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vmin_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector { vminub(vu, vv) } @@ -2316,7 +2329,7 @@ pub unsafe fn q6_vub_vmin_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vminuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vmin_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vmin_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVector { vminuh(vu, vv) } @@ -2328,7 +2341,7 @@ pub unsafe fn q6_vuh_vmin_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vminw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmin_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vmin_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vminw(vu, vv) } @@ -2340,7 +2353,7 @@ pub unsafe fn q6_vw_vmin_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpabus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpa_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vmpa_WubRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { vmpabus(vuu, rt) } @@ -2352,7 +2365,7 @@ pub unsafe fn q6_wh_vmpa_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpabus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpaacc_whwubrb( +pub unsafe fn Q6_Wh_vmpaacc_WhWubRb( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -2368,7 +2381,7 @@ pub unsafe fn q6_wh_vmpaacc_whwubrb( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpabusv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpa_wubwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vmpa_WubWb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vmpabusv(vuu, vvv) } @@ -2380,7 +2393,7 @@ pub unsafe fn q6_wh_vmpa_wubwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVec #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpabuuv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpa_wubwub(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vmpa_WubWub(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vmpabuuv(vuu, vvv) } @@ -2392,7 +2405,7 @@ pub unsafe fn q6_wh_vmpa_wubwub(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVe #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpahb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vmpa_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vmpa_WhRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { vmpahb(vuu, rt) } @@ -2404,7 +2417,7 @@ pub unsafe fn q6_ww_vmpa_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpahb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vmpaacc_wwwhrb( +pub unsafe fn Q6_Ww_vmpaacc_WwWhRb( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -2420,7 +2433,7 @@ pub unsafe fn q6_ww_vmpaacc_wwwhrb( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vmpy_VubRb(vu: HvxVector, rt: i32) -> HvxVectorPair { vmpybus(vu, rt) } @@ -2432,7 +2445,7 @@ pub unsafe fn q6_wh_vmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpyacc_whvubrb(vxx: HvxVectorPair, vu: HvxVector, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vmpyacc_WhVubRb(vxx: HvxVectorPair, vu: HvxVector, rt: i32) -> HvxVectorPair { vmpybus_acc(vxx, vu, rt) } @@ -2444,7 +2457,7 @@ pub unsafe fn q6_wh_vmpyacc_whvubrb(vxx: HvxVectorPair, vu: HvxVector, rt: i32) #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybusv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpy_vubvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vmpy_VubVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vmpybusv(vu, vv) } @@ -2456,7 +2469,7 @@ pub unsafe fn q6_wh_vmpy_vubvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybusv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpyacc_whvubvb( +pub unsafe fn Q6_Wh_vmpyacc_WhVubVb( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -2472,7 +2485,7 @@ pub unsafe fn q6_wh_vmpyacc_whvubvb( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpy_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vmpy_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vmpybv(vu, vv) } @@ -2484,7 +2497,7 @@ pub unsafe fn q6_wh_vmpy_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpyacc_whvbvb( +pub unsafe fn Q6_Wh_vmpyacc_WhVbVb( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -2500,7 +2513,7 @@ pub unsafe fn q6_wh_vmpyacc_whvbvb( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyewuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpye_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vmpye_VwVuh(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyewuh(vu, vv) } @@ -2512,7 +2525,7 @@ pub unsafe fn q6_vw_vmpye_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vmpy_vhrh(vu: HvxVector, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vmpy_VhRh(vu: HvxVector, rt: i32) -> HvxVectorPair { vmpyh(vu, rt) } @@ -2524,7 +2537,7 @@ pub unsafe fn q6_ww_vmpy_vhrh(vu: HvxVector, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhsat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vmpyacc_wwvhrh_sat( +pub unsafe fn Q6_Ww_vmpyacc_WwVhRh_sat( vxx: HvxVectorPair, vu: HvxVector, rt: i32, @@ -2540,7 +2553,7 @@ pub unsafe fn q6_ww_vmpyacc_wwvhrh_sat( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhsrs))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vmpy_vhrh_s1_rnd_sat(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vmpy_VhRh_s1_rnd_sat(vu: HvxVector, rt: i32) -> HvxVector { vmpyhsrs(vu, rt) } @@ -2552,7 +2565,7 @@ pub unsafe fn q6_vh_vmpy_vhrh_s1_rnd_sat(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhss))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vmpy_vhrh_s1_sat(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vmpy_VhRh_s1_sat(vu: HvxVector, rt: i32) -> HvxVector { vmpyhss(vu, rt) } @@ -2564,7 +2577,7 @@ pub unsafe fn q6_vh_vmpy_vhrh_s1_sat(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vmpy_vhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vmpy_VhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vmpyhus(vu, vv) } @@ -2576,7 +2589,7 @@ pub unsafe fn q6_ww_vmpy_vhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vmpyacc_wwvhvuh( +pub unsafe fn Q6_Ww_vmpyacc_WwVhVuh( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -2592,7 +2605,7 @@ pub unsafe fn q6_ww_vmpyacc_wwvhvuh( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vmpy_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vmpy_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vmpyhv(vu, vv) } @@ -2604,7 +2617,7 @@ pub unsafe fn q6_ww_vmpy_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vmpyacc_wwvhvh( +pub unsafe fn Q6_Ww_vmpyacc_WwVhVh( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -2620,7 +2633,7 @@ pub unsafe fn q6_ww_vmpyacc_wwvhvh( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhvsrs))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vmpy_vhvh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vmpy_VhVh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyhvsrs(vu, vv) } @@ -2632,7 +2645,7 @@ pub unsafe fn q6_vh_vmpy_vhvh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVec #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyieoh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyieo_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyieo_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyieoh(vu, vv) } @@ -2644,7 +2657,7 @@ pub unsafe fn q6_vw_vmpyieo_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiewh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyieacc_vwvwvh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyieacc_VwVwVh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyiewh_acc(vx, vu, vv) } @@ -2656,7 +2669,7 @@ pub unsafe fn q6_vw_vmpyieacc_vwvwvh(vx: HvxVector, vu: HvxVector, vv: HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiewuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyie_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyie_VwVuh(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyiewuh(vu, vv) } @@ -2668,7 +2681,7 @@ pub unsafe fn q6_vw_vmpyie_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiewuh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyieacc_vwvwvuh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyieacc_VwVwVuh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyiewuh_acc(vx, vu, vv) } @@ -2680,7 +2693,7 @@ pub unsafe fn q6_vw_vmpyieacc_vwvwvuh(vx: HvxVector, vu: HvxVector, vv: HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyih))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vmpyi_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vmpyi_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyih(vu, vv) } @@ -2692,7 +2705,7 @@ pub unsafe fn q6_vh_vmpyi_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyih_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vmpyiacc_vhvhvh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vmpyiacc_VhVhVh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyih_acc(vx, vu, vv) } @@ -2704,7 +2717,7 @@ pub unsafe fn q6_vh_vmpyiacc_vhvhvh(vx: HvxVector, vu: HvxVector, vv: HvxVector) #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyihb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vmpyi_vhrb(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vmpyi_VhRb(vu: HvxVector, rt: i32) -> HvxVector { vmpyihb(vu, rt) } @@ -2716,7 +2729,7 @@ pub unsafe fn q6_vh_vmpyi_vhrb(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyihb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vmpyiacc_vhvhrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vmpyiacc_VhVhRb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vmpyihb_acc(vx, vu, rt) } @@ -2728,7 +2741,7 @@ pub unsafe fn q6_vh_vmpyiacc_vhvhrb(vx: HvxVector, vu: HvxVector, rt: i32) -> Hv #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiowh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyio_vwvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyio_VwVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyiowh(vu, vv) } @@ -2740,7 +2753,7 @@ pub unsafe fn q6_vw_vmpyio_vwvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiwb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyi_vwrb(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyi_VwRb(vu: HvxVector, rt: i32) -> HvxVector { vmpyiwb(vu, rt) } @@ -2752,7 +2765,7 @@ pub unsafe fn q6_vw_vmpyi_vwrb(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiwb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyiacc_vwvwrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyiacc_VwVwRb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vmpyiwb_acc(vx, vu, rt) } @@ -2764,7 +2777,7 @@ pub unsafe fn q6_vw_vmpyiacc_vwvwrb(vx: HvxVector, vu: HvxVector, rt: i32) -> Hv #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiwh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyi_vwrh(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyi_VwRh(vu: HvxVector, rt: i32) -> HvxVector { vmpyiwh(vu, rt) } @@ -2776,7 +2789,7 @@ pub unsafe fn q6_vw_vmpyi_vwrh(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiwh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyiacc_vwvwrh(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyiacc_VwVwRh(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vmpyiwh_acc(vx, vu, rt) } @@ -2788,7 +2801,7 @@ pub unsafe fn q6_vw_vmpyiacc_vwvwrh(vx: HvxVector, vu: HvxVector, rt: i32) -> Hv #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyowh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyo_vwvh_s1_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyo_VwVh_s1_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyowh(vu, vv) } @@ -2800,7 +2813,7 @@ pub unsafe fn q6_vw_vmpyo_vwvh_s1_sat(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyowh_rnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyo_vwvh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyo_VwVh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyowh_rnd(vu, vv) } @@ -2812,7 +2825,7 @@ pub unsafe fn q6_vw_vmpyo_vwvh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVe #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyowh_rnd_sacc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyoacc_vwvwvh_s1_rnd_sat_shift( +pub unsafe fn Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift( vx: HvxVector, vu: HvxVector, vv: HvxVector, @@ -2828,7 +2841,7 @@ pub unsafe fn q6_vw_vmpyoacc_vwvwvh_s1_rnd_sat_shift( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyowh_sacc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyoacc_vwvwvh_s1_sat_shift( +pub unsafe fn Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift( vx: HvxVector, vu: HvxVector, vv: HvxVector, @@ -2844,7 +2857,7 @@ pub unsafe fn q6_vw_vmpyoacc_vwvwvh_s1_sat_shift( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuh_vmpy_vubrub(vu: HvxVector, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wuh_vmpy_VubRub(vu: HvxVector, rt: i32) -> HvxVectorPair { vmpyub(vu, rt) } @@ -2856,7 +2869,7 @@ pub unsafe fn q6_wuh_vmpy_vubrub(vu: HvxVector, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyub_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuh_vmpyacc_wuhvubrub( +pub unsafe fn Q6_Wuh_vmpyacc_WuhVubRub( vxx: HvxVectorPair, vu: HvxVector, rt: i32, @@ -2872,7 +2885,7 @@ pub unsafe fn q6_wuh_vmpyacc_wuhvubrub( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyubv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuh_vmpy_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wuh_vmpy_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vmpyubv(vu, vv) } @@ -2884,7 +2897,7 @@ pub unsafe fn q6_wuh_vmpy_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyubv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuh_vmpyacc_wuhvubvub( +pub unsafe fn Q6_Wuh_vmpyacc_WuhVubVub( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -2900,7 +2913,7 @@ pub unsafe fn q6_wuh_vmpyacc_wuhvubvub( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vmpy_vuhruh(vu: HvxVector, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wuw_vmpy_VuhRuh(vu: HvxVector, rt: i32) -> HvxVectorPair { vmpyuh(vu, rt) } @@ -2912,7 +2925,7 @@ pub unsafe fn q6_wuw_vmpy_vuhruh(vu: HvxVector, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyuh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vmpyacc_wuwvuhruh( +pub unsafe fn Q6_Wuw_vmpyacc_WuwVuhRuh( vxx: HvxVectorPair, vu: HvxVector, rt: i32, @@ -2928,7 +2941,7 @@ pub unsafe fn q6_wuw_vmpyacc_wuwvuhruh( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyuhv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vmpy_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wuw_vmpy_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vmpyuhv(vu, vv) } @@ -2940,7 +2953,7 @@ pub unsafe fn q6_wuw_vmpy_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyuhv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vmpyacc_wuwvuhvuh( +pub unsafe fn Q6_Wuw_vmpyacc_WuwVuhVuh( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -2956,7 +2969,7 @@ pub unsafe fn q6_wuw_vmpyacc_wuwvuhvuh( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnavgh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vnavg_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vnavg_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vnavgh(vu, vv) } @@ -2968,7 +2981,7 @@ pub unsafe fn q6_vh_vnavg_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnavgub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vnavg_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vnavg_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector { vnavgub(vu, vv) } @@ -2980,7 +2993,7 @@ pub unsafe fn q6_vb_vnavg_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnavgw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vnavg_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vnavg_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vnavgw(vu, vv) } @@ -2992,7 +3005,7 @@ pub unsafe fn q6_vw_vnavg_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnormamth))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vnormamt_vh(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vnormamt_Vh(vu: HvxVector) -> HvxVector { vnormamth(vu) } @@ -3004,7 +3017,7 @@ pub unsafe fn q6_vh_vnormamt_vh(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnormamtw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vnormamt_vw(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vnormamt_Vw(vu: HvxVector) -> HvxVector { vnormamtw(vu) } @@ -3016,7 +3029,7 @@ pub unsafe fn q6_vw_vnormamt_vw(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnot))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vnot_v(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vnot_V(vu: HvxVector) -> HvxVector { vnot(vu) } @@ -3028,7 +3041,7 @@ pub unsafe fn q6_v_vnot_v(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vor))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vor_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vor_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { simd_or(vu, vv) } @@ -3040,7 +3053,7 @@ pub unsafe fn q6_v_vor_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackeb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vpacke_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vpacke_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vpackeb(vu, vv) } @@ -3052,7 +3065,7 @@ pub unsafe fn q6_vb_vpacke_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackeh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vpacke_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vpacke_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vpackeh(vu, vv) } @@ -3064,7 +3077,7 @@ pub unsafe fn q6_vh_vpacke_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackhb_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vpack_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vpack_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vpackhb_sat(vu, vv) } @@ -3076,7 +3089,7 @@ pub unsafe fn q6_vb_vpack_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackhub_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vpack_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vpack_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vpackhub_sat(vu, vv) } @@ -3088,7 +3101,7 @@ pub unsafe fn q6_vub_vpack_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackob))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vpacko_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vpacko_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vpackob(vu, vv) } @@ -3100,7 +3113,7 @@ pub unsafe fn q6_vb_vpacko_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackoh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vpacko_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vpacko_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vpackoh(vu, vv) } @@ -3112,7 +3125,7 @@ pub unsafe fn q6_vh_vpacko_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackwh_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vpack_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vpack_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vpackwh_sat(vu, vv) } @@ -3124,7 +3137,7 @@ pub unsafe fn q6_vh_vpack_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackwuh_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vpack_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vpack_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vpackwuh_sat(vu, vv) } @@ -3136,7 +3149,7 @@ pub unsafe fn q6_vuh_vpack_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpopcounth))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vpopcount_vh(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vpopcount_Vh(vu: HvxVector) -> HvxVector { vpopcounth(vu) } @@ -3148,7 +3161,7 @@ pub unsafe fn q6_vh_vpopcount_vh(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrdelta))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vrdelta_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vrdelta_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { vrdelta(vu, vv) } @@ -3160,7 +3173,7 @@ pub unsafe fn q6_v_vrdelta_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vrmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vrmpy_VubRb(vu: HvxVector, rt: i32) -> HvxVector { vrmpybus(vu, rt) } @@ -3172,7 +3185,7 @@ pub unsafe fn q6_vw_vrmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vrmpyacc_vwvubrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vrmpyacc_VwVubRb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vrmpybus_acc(vx, vu, rt) } @@ -3184,7 +3197,7 @@ pub unsafe fn q6_vw_vrmpyacc_vwvubrb(vx: HvxVector, vu: HvxVector, rt: i32) -> H #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybusi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vrmpy_wubrbi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vrmpy_WubRbI(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { vrmpybusi(vuu, rt, iu1) } @@ -3196,7 +3209,7 @@ pub unsafe fn q6_ww_vrmpy_wubrbi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVe #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybusi_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vrmpyacc_wwwubrbi( +pub unsafe fn Q6_Ww_vrmpyacc_WwWubRbI( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -3213,7 +3226,7 @@ pub unsafe fn q6_ww_vrmpyacc_wwwubrbi( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybusv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vrmpy_vubvb(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vrmpy_VubVb(vu: HvxVector, vv: HvxVector) -> HvxVector { vrmpybusv(vu, vv) } @@ -3225,7 +3238,7 @@ pub unsafe fn q6_vw_vrmpy_vubvb(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybusv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vrmpyacc_vwvubvb(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vrmpyacc_VwVubVb(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { vrmpybusv_acc(vx, vu, vv) } @@ -3237,7 +3250,7 @@ pub unsafe fn q6_vw_vrmpyacc_vwvubvb(vx: HvxVector, vu: HvxVector, vv: HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vrmpy_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vrmpy_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { vrmpybv(vu, vv) } @@ -3249,7 +3262,7 @@ pub unsafe fn q6_vw_vrmpy_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vrmpyacc_vwvbvb(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vrmpyacc_VwVbVb(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { vrmpybv_acc(vx, vu, vv) } @@ -3261,7 +3274,7 @@ pub unsafe fn q6_vw_vrmpyacc_vwvbvb(vx: HvxVector, vu: HvxVector, vv: HvxVector) #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vrmpy_vubrub(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vuw_vrmpy_VubRub(vu: HvxVector, rt: i32) -> HvxVector { vrmpyub(vu, rt) } @@ -3273,7 +3286,7 @@ pub unsafe fn q6_vuw_vrmpy_vubrub(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyub_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vrmpyacc_vuwvubrub(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vuw_vrmpyacc_VuwVubRub(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vrmpyub_acc(vx, vu, rt) } @@ -3285,7 +3298,7 @@ pub unsafe fn q6_vuw_vrmpyacc_vuwvubrub(vx: HvxVector, vu: HvxVector, rt: i32) - #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyubi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vrmpy_wubrubi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wuw_vrmpy_WubRubI(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { vrmpyubi(vuu, rt, iu1) } @@ -3297,7 +3310,7 @@ pub unsafe fn q6_wuw_vrmpy_wubrubi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> Hvx #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyubi_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vrmpyacc_wuwwubrubi( +pub unsafe fn Q6_Wuw_vrmpyacc_WuwWubRubI( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -3314,7 +3327,7 @@ pub unsafe fn q6_wuw_vrmpyacc_wuwwubrubi( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyubv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vrmpy_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuw_vrmpy_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector { vrmpyubv(vu, vv) } @@ -3326,7 +3339,7 @@ pub unsafe fn q6_vuw_vrmpy_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyubv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vrmpyacc_vuwvubvub(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuw_vrmpyacc_VuwVubVub(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { vrmpyubv_acc(vx, vu, vv) } @@ -3338,7 +3351,7 @@ pub unsafe fn q6_vuw_vrmpyacc_vuwvubvub(vx: HvxVector, vu: HvxVector, vv: HvxVec #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vror))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vror_vr(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_V_vror_VR(vu: HvxVector, rt: i32) -> HvxVector { vror(vu, rt) } @@ -3350,7 +3363,7 @@ pub unsafe fn q6_v_vror_vr(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vroundhb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vround_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vround_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vroundhb(vu, vv) } @@ -3362,7 +3375,7 @@ pub unsafe fn q6_vb_vround_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vroundhub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vround_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vround_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vroundhub(vu, vv) } @@ -3374,7 +3387,7 @@ pub unsafe fn q6_vub_vround_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vroundwh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vround_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vround_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vroundwh(vu, vv) } @@ -3386,7 +3399,7 @@ pub unsafe fn q6_vh_vround_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vroundwuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vround_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vround_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vroundwuh(vu, vv) } @@ -3398,7 +3411,7 @@ pub unsafe fn q6_vuh_vround_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrsadubi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vrsad_wubrubi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wuw_vrsad_WubRubI(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { vrsadubi(vuu, rt, iu1) } @@ -3410,7 +3423,7 @@ pub unsafe fn q6_wuw_vrsad_wubrubi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> Hvx #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrsadubi_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vrsadacc_wuwwubrubi( +pub unsafe fn Q6_Wuw_vrsadacc_WuwWubRubI( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -3427,7 +3440,7 @@ pub unsafe fn q6_wuw_vrsadacc_wuwwubrubi( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsathub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vsat_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vsat_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vsathub(vu, vv) } @@ -3439,7 +3452,7 @@ pub unsafe fn q6_vub_vsat_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsatwh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vsat_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vsat_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vsatwh(vu, vv) } @@ -3451,7 +3464,7 @@ pub unsafe fn q6_vh_vsat_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vsxt_vb(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vsxt_Vb(vu: HvxVector) -> HvxVectorPair { vsb(vu) } @@ -3463,7 +3476,7 @@ pub unsafe fn q6_wh_vsxt_vb(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vsxt_vh(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vsxt_Vh(vu: HvxVector) -> HvxVectorPair { vsh(vu) } @@ -3475,7 +3488,7 @@ pub unsafe fn q6_ww_vsxt_vh(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshufeh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vshuffe_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vshuffe_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vshufeh(vu, vv) } @@ -3487,7 +3500,7 @@ pub unsafe fn q6_vh_vshuffe_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshuffb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vshuff_vb(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vshuff_Vb(vu: HvxVector) -> HvxVector { vshuffb(vu) } @@ -3499,7 +3512,7 @@ pub unsafe fn q6_vb_vshuff_vb(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshuffeb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vshuffe_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vshuffe_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { vshuffeb(vu, vv) } @@ -3511,7 +3524,7 @@ pub unsafe fn q6_vb_vshuffe_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshuffh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vshuff_vh(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vshuff_Vh(vu: HvxVector) -> HvxVector { vshuffh(vu) } @@ -3523,7 +3536,7 @@ pub unsafe fn q6_vh_vshuff_vh(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshuffob))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vshuffo_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vshuffo_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { vshuffob(vu, vv) } @@ -3535,7 +3548,7 @@ pub unsafe fn q6_vb_vshuffo_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshuffvdd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_w_vshuff_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_W_vshuff_VVR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { vshuffvdd(vu, vv, rt) } @@ -3547,7 +3560,7 @@ pub unsafe fn q6_w_vshuff_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshufoeb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wb_vshuffoe_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wb_vshuffoe_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vshufoeb(vu, vv) } @@ -3559,7 +3572,7 @@ pub unsafe fn q6_wb_vshuffoe_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshufoeh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vshuffoe_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vshuffoe_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vshufoeh(vu, vv) } @@ -3571,7 +3584,7 @@ pub unsafe fn q6_wh_vshuffoe_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshufoh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vshuffo_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vshuffo_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vshufoh(vu, vv) } @@ -3583,7 +3596,7 @@ pub unsafe fn q6_vh_vshuffo_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vsub_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vsub_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { vsubb(vu, vv) } @@ -3595,7 +3608,7 @@ pub unsafe fn q6_vb_vsub_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubb_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wb_vsub_wbwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wb_vsub_WbWb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vsubb_dv(vuu, vvv) } @@ -3607,7 +3620,7 @@ pub unsafe fn q6_wb_vsub_wbwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vsub_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vsub_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vsubh(vu, vv) } @@ -3619,7 +3632,7 @@ pub unsafe fn q6_vh_vsub_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubh_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vsub_whwh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vsub_WhWh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vsubh_dv(vuu, vvv) } @@ -3631,7 +3644,7 @@ pub unsafe fn q6_wh_vsub_whwh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vsub_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vsub_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vsubhsat(vu, vv) } @@ -3643,7 +3656,7 @@ pub unsafe fn q6_vh_vsub_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubhsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vsub_whwh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vsub_WhWh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vsubhsat_dv(vuu, vvv) } @@ -3655,7 +3668,7 @@ pub unsafe fn q6_wh_vsub_whwh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vsub_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vsub_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vsubhw(vu, vv) } @@ -3667,7 +3680,7 @@ pub unsafe fn q6_ww_vsub_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsububh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vsub_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vsub_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vsububh(vu, vv) } @@ -3679,7 +3692,7 @@ pub unsafe fn q6_wh_vsub_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsububsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vsub_vubvub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vsub_VubVub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vsububsat(vu, vv) } @@ -3691,7 +3704,7 @@ pub unsafe fn q6_vub_vsub_vubvub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsububsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wub_vsub_wubwub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wub_vsub_WubWub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vsububsat_dv(vuu, vvv) } @@ -3703,7 +3716,7 @@ pub unsafe fn q6_wub_vsub_wubwub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubuhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vsub_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vsub_VuhVuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vsubuhsat(vu, vv) } @@ -3715,7 +3728,7 @@ pub unsafe fn q6_vuh_vsub_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubuhsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuh_vsub_wuhwuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wuh_vsub_WuhWuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vsubuhsat_dv(vuu, vvv) } @@ -3727,7 +3740,7 @@ pub unsafe fn q6_wuh_vsub_wuhwuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubuhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vsub_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vsub_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vsubuhw(vu, vv) } @@ -3739,7 +3752,7 @@ pub unsafe fn q6_ww_vsub_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vsub_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vsub_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { simd_sub(vu, vv) } @@ -3751,7 +3764,7 @@ pub unsafe fn q6_vw_vsub_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubw_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vsub_wwww(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vsub_WwWw(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vsubw_dv(vuu, vvv) } @@ -3763,7 +3776,7 @@ pub unsafe fn q6_ww_vsub_wwww(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubwsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vsub_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vsub_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vsubwsat(vu, vv) } @@ -3775,7 +3788,7 @@ pub unsafe fn q6_vw_vsub_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubwsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vsub_wwww_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vsub_WwWw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vsubwsat_dv(vuu, vvv) } @@ -3787,7 +3800,7 @@ pub unsafe fn q6_ww_vsub_wwww_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpyb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vtmpy_wbrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vtmpy_WbRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { vtmpyb(vuu, rt) } @@ -3799,7 +3812,7 @@ pub unsafe fn q6_wh_vtmpy_wbrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpyb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vtmpyacc_whwbrb( +pub unsafe fn Q6_Wh_vtmpyacc_WhWbRb( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -3815,7 +3828,7 @@ pub unsafe fn q6_wh_vtmpyacc_whwbrb( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpybus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vtmpy_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vtmpy_WubRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { vtmpybus(vuu, rt) } @@ -3827,7 +3840,7 @@ pub unsafe fn q6_wh_vtmpy_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpybus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vtmpyacc_whwubrb( +pub unsafe fn Q6_Wh_vtmpyacc_WhWubRb( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -3843,7 +3856,7 @@ pub unsafe fn q6_wh_vtmpyacc_whwubrb( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpyhb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vtmpy_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vtmpy_WhRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { vtmpyhb(vuu, rt) } @@ -3855,7 +3868,7 @@ pub unsafe fn q6_ww_vtmpy_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpyhb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vtmpyacc_wwwhrb( +pub unsafe fn Q6_Ww_vtmpyacc_WwWhRb( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -3871,7 +3884,7 @@ pub unsafe fn q6_ww_vtmpyacc_wwwhrb( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vunpack_vb(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vunpack_Vb(vu: HvxVector) -> HvxVectorPair { vunpackb(vu) } @@ -3883,7 +3896,7 @@ pub unsafe fn q6_wh_vunpack_vb(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vunpack_vh(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vunpack_Vh(vu: HvxVector) -> HvxVectorPair { vunpackh(vu) } @@ -3895,7 +3908,7 @@ pub unsafe fn q6_ww_vunpack_vh(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackob))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vunpackoor_whvb(vxx: HvxVectorPair, vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vunpackoor_WhVb(vxx: HvxVectorPair, vu: HvxVector) -> HvxVectorPair { vunpackob(vxx, vu) } @@ -3907,7 +3920,7 @@ pub unsafe fn q6_wh_vunpackoor_whvb(vxx: HvxVectorPair, vu: HvxVector) -> HvxVec #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackoh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vunpackoor_wwvh(vxx: HvxVectorPair, vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vunpackoor_WwVh(vxx: HvxVectorPair, vu: HvxVector) -> HvxVectorPair { vunpackoh(vxx, vu) } @@ -3919,7 +3932,7 @@ pub unsafe fn q6_ww_vunpackoor_wwvh(vxx: HvxVectorPair, vu: HvxVector) -> HvxVec #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuh_vunpack_vub(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wuh_vunpack_Vub(vu: HvxVector) -> HvxVectorPair { vunpackub(vu) } @@ -3931,7 +3944,7 @@ pub unsafe fn q6_wuh_vunpack_vub(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vunpack_vuh(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wuw_vunpack_Vuh(vu: HvxVector) -> HvxVectorPair { vunpackuh(vu) } @@ -3943,7 +3956,7 @@ pub unsafe fn q6_wuw_vunpack_vuh(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vxor))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vxor_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vxor_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { simd_xor(vu, vv) } @@ -3955,7 +3968,7 @@ pub unsafe fn q6_v_vxor_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vzb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuh_vzxt_vub(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wuh_vzxt_Vub(vu: HvxVector) -> HvxVectorPair { vzb(vu) } @@ -3967,7 +3980,7 @@ pub unsafe fn q6_wuh_vzxt_vub(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vzh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vzxt_vuh(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wuw_vzxt_Vuh(vu: HvxVector) -> HvxVectorPair { vzh(vu) } @@ -3979,7 +3992,7 @@ pub unsafe fn q6_wuw_vzxt_vuh(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(lvsplatb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vsplat_r(rt: i32) -> HvxVector { +pub unsafe fn Q6_Vb_vsplat_R(rt: i32) -> HvxVector { lvsplatb(rt) } @@ -3991,7 +4004,7 @@ pub unsafe fn q6_vb_vsplat_r(rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(lvsplath))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vsplat_r(rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vsplat_R(rt: i32) -> HvxVector { lvsplath(rt) } @@ -4003,7 +4016,7 @@ pub unsafe fn q6_vh_vsplat_r(rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddbsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vadd_vbvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vadd_VbVb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vaddbsat(vu, vv) } @@ -4015,7 +4028,7 @@ pub unsafe fn q6_vb_vadd_vbvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddbsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wb_vadd_wbwb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wb_vadd_WbWb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vaddbsat_dv(vuu, vvv) } @@ -4027,7 +4040,7 @@ pub unsafe fn q6_wb_vadd_wbwb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddclbh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vadd_vclb_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vadd_vclb_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vaddclbh(vu, vv) } @@ -4039,7 +4052,7 @@ pub unsafe fn q6_vh_vadd_vclb_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddclbw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vadd_vclb_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vadd_vclb_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vaddclbw(vu, vv) } @@ -4051,7 +4064,7 @@ pub unsafe fn q6_vw_vadd_vclb_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddhw_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vaddacc_wwvhvh( +pub unsafe fn Q6_Ww_vaddacc_WwVhVh( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -4067,7 +4080,7 @@ pub unsafe fn q6_ww_vaddacc_wwvhvh( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddubh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vaddacc_whvubvub( +pub unsafe fn Q6_Wh_vaddacc_WhVubVub( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -4083,7 +4096,7 @@ pub unsafe fn q6_wh_vaddacc_whvubvub( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddububb_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vadd_vubvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vadd_VubVb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vaddububb_sat(vu, vv) } @@ -4095,7 +4108,7 @@ pub unsafe fn q6_vub_vadd_vubvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vadduhw_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vaddacc_wwvuhvuh( +pub unsafe fn Q6_Ww_vaddacc_WwVuhVuh( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -4111,7 +4124,7 @@ pub unsafe fn q6_ww_vaddacc_wwvuhvuh( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vadduwsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vadd_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuw_vadd_VuwVuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vadduwsat(vu, vv) } @@ -4123,7 +4136,7 @@ pub unsafe fn q6_vuw_vadd_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vadduwsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vadd_wuwwuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wuw_vadd_WuwWuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vadduwsat_dv(vuu, vvv) } @@ -4135,7 +4148,7 @@ pub unsafe fn q6_wuw_vadd_wuwwuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vasrhbsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vasr_vhvhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vb_vasr_VhVhR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasrhbsat(vu, vv, rt) } @@ -4147,7 +4160,7 @@ pub unsafe fn q6_vb_vasr_vhvhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> Hvx #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vasruwuhrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vasr_vuwvuwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vuh_vasr_VuwVuwR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasruwuhrndsat(vu, vv, rt) } @@ -4159,7 +4172,7 @@ pub unsafe fn q6_vuh_vasr_vuwvuwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vasrwuhrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vasr_vwvwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vuh_vasr_VwVwR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasrwuhrndsat(vu, vv, rt) } @@ -4171,7 +4184,7 @@ pub unsafe fn q6_vuh_vasr_vwvwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) - #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlsrb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vlsr_vubr(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vub_vlsr_VubR(vu: HvxVector, rt: i32) -> HvxVector { vlsrb(vu, rt) } @@ -4183,7 +4196,7 @@ pub unsafe fn q6_vub_vlsr_vubr(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvvb_nm))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vlut32_vbvbr_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vb_vlut32_VbVbR_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vlutvvb_nm(vu, vv, rt) } @@ -4195,7 +4208,7 @@ pub unsafe fn q6_vb_vlut32_vbvbr_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvvb_oracci))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vlut32or_vbvbvbi( +pub unsafe fn Q6_Vb_vlut32or_VbVbVbI( vx: HvxVector, vu: HvxVector, vv: HvxVector, @@ -4212,7 +4225,7 @@ pub unsafe fn q6_vb_vlut32or_vbvbvbi( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvvbi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vlut32_vbvbi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { +pub unsafe fn Q6_Vb_vlut32_VbVbI(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { vlutvvbi(vu, vv, iu3) } @@ -4224,7 +4237,7 @@ pub unsafe fn q6_vb_vlut32_vbvbi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxV #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvwh_nm))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vlut16_vbvhr_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vlut16_VbVhR_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { vlutvwh_nm(vu, vv, rt) } @@ -4236,7 +4249,7 @@ pub unsafe fn q6_wh_vlut16_vbvhr_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvwh_oracci))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vlut16or_whvbvhi( +pub unsafe fn Q6_Wh_vlut16or_WhVbVhI( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -4253,7 +4266,7 @@ pub unsafe fn q6_wh_vlut16or_whvbvhi( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvwhi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vlut16_vbvhi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vlut16_VbVhI(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVectorPair { vlutvwhi(vu, vv, iu3) } @@ -4265,7 +4278,7 @@ pub unsafe fn q6_wh_vlut16_vbvhi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxV #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmaxb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vmax_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vmax_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { vmaxb(vu, vv) } @@ -4277,7 +4290,7 @@ pub unsafe fn q6_vb_vmax_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vminb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vmin_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vmin_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { vminb(vu, vv) } @@ -4289,7 +4302,7 @@ pub unsafe fn q6_vb_vmin_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpauhb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vmpa_wuhrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vmpa_WuhRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { vmpauhb(vuu, rt) } @@ -4301,7 +4314,7 @@ pub unsafe fn q6_ww_vmpa_wuhrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpauhb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vmpaacc_wwwuhrb( +pub unsafe fn Q6_Ww_vmpaacc_WwWuhRb( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -4317,7 +4330,7 @@ pub unsafe fn q6_ww_vmpaacc_wwwuhrb( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpyewuh_64))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_w_vmpye_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_W_vmpye_VwVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vmpyewuh_64(vu, vv) } @@ -4329,7 +4342,7 @@ pub unsafe fn q6_w_vmpye_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpyiwub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyi_vwrub(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyi_VwRub(vu: HvxVector, rt: i32) -> HvxVector { vmpyiwub(vu, rt) } @@ -4341,7 +4354,7 @@ pub unsafe fn q6_vw_vmpyi_vwrub(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpyiwub_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyiacc_vwvwrub(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyiacc_VwVwRub(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vmpyiwub_acc(vx, vu, rt) } @@ -4353,7 +4366,7 @@ pub unsafe fn q6_vw_vmpyiacc_vwvwrub(vx: HvxVector, vu: HvxVector, rt: i32) -> H #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpyowh_64_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_w_vmpyoacc_wvwvh( +pub unsafe fn Q6_W_vmpyoacc_WVwVh( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -4369,7 +4382,7 @@ pub unsafe fn q6_w_vmpyoacc_wvwvh( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vrounduhub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vround_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vround_VuhVuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vrounduhub(vu, vv) } @@ -4381,7 +4394,7 @@ pub unsafe fn q6_vub_vround_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vrounduwuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vround_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vround_VuwVuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vrounduwuh(vu, vv) } @@ -4393,7 +4406,7 @@ pub unsafe fn q6_vuh_vround_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsatuwuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vsat_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vsat_VuwVuw(vu: HvxVector, vv: HvxVector) -> HvxVector { vsatuwuh(vu, vv) } @@ -4405,7 +4418,7 @@ pub unsafe fn q6_vuh_vsat_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsubbsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vsub_vbvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vsub_VbVb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vsubbsat(vu, vv) } @@ -4417,7 +4430,7 @@ pub unsafe fn q6_vb_vsub_vbvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsubbsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wb_vsub_wbwb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wb_vsub_WbWb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vsubbsat_dv(vuu, vvv) } @@ -4429,7 +4442,7 @@ pub unsafe fn q6_wb_vsub_wbwb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsubububb_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vsub_vubvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vsub_VubVb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vsubububb_sat(vu, vv) } @@ -4441,7 +4454,7 @@ pub unsafe fn q6_vub_vsub_vubvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsubuwsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vsub_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuw_vsub_VuwVuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vsubuwsat(vu, vv) } @@ -4453,7 +4466,7 @@ pub unsafe fn q6_vuw_vsub_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsubuwsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vsub_wuwwuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wuw_vsub_WuwWuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vsubuwsat_dv(vuu, vvv) } @@ -4465,7 +4478,7 @@ pub unsafe fn q6_wuw_vsub_wuwwuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vabsb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vabs_vb(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vabs_Vb(vu: HvxVector) -> HvxVector { vabsb(vu) } @@ -4477,7 +4490,7 @@ pub unsafe fn q6_vb_vabs_vb(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vabsb_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vabs_vb_sat(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vabs_Vb_sat(vu: HvxVector) -> HvxVector { vabsb_sat(vu) } @@ -4489,7 +4502,7 @@ pub unsafe fn q6_vb_vabs_vb_sat(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vaslh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vaslacc_vhvhr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vaslacc_VhVhR(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vaslh_acc(vx, vu, rt) } @@ -4501,7 +4514,7 @@ pub unsafe fn q6_vh_vaslacc_vhvhr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxV #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vasrh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vasracc_vhvhr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vasracc_VhVhR(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vasrh_acc(vx, vu, rt) } @@ -4513,7 +4526,7 @@ pub unsafe fn q6_vh_vasracc_vhvhr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxV #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vasruhubrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vasr_vuhvuhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vub_vasr_VuhVuhR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasruhubrndsat(vu, vv, rt) } @@ -4525,7 +4538,7 @@ pub unsafe fn q6_vub_vasr_vuhvuhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vasruhubsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vasr_vuhvuhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vub_vasr_VuhVuhR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasruhubsat(vu, vv, rt) } @@ -4537,7 +4550,7 @@ pub unsafe fn q6_vub_vasr_vuhvuhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vasruwuhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vasr_vuwvuwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vuh_vasr_VuwVuwR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasruwuhsat(vu, vv, rt) } @@ -4549,7 +4562,7 @@ pub unsafe fn q6_vuh_vasr_vuwvuwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vavgb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vavg_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vavg_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { vavgb(vu, vv) } @@ -4561,7 +4574,7 @@ pub unsafe fn q6_vb_vavg_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vavgbrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vavg_vbvb_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vavg_VbVb_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { vavgbrnd(vu, vv) } @@ -4573,7 +4586,7 @@ pub unsafe fn q6_vb_vavg_vbvb_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vavguw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vavg_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuw_vavg_VuwVuw(vu: HvxVector, vv: HvxVector) -> HvxVector { vavguw(vu, vv) } @@ -4585,7 +4598,7 @@ pub unsafe fn q6_vuw_vavg_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vavguwrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vavg_vuwvuw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuw_vavg_VuwVuw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { vavguwrnd(vu, vv) } @@ -4597,7 +4610,7 @@ pub unsafe fn q6_vuw_vavg_vuwvuw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vdd0))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_w_vzero() -> HvxVectorPair { +pub unsafe fn Q6_W_vzero() -> HvxVectorPair { vdd0() } @@ -4609,7 +4622,7 @@ pub unsafe fn q6_w_vzero() -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vgathermh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vgather_armvh(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVector) { +pub unsafe fn Q6_vgather_ARMVh(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVector) { vgathermh(rs, rt, mu, vv) } @@ -4621,7 +4634,7 @@ pub unsafe fn q6_vgather_armvh(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vgathermhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vgather_armww(rs: *mut HvxVector, rt: i32, mu: i32, vvv: HvxVectorPair) { +pub unsafe fn Q6_vgather_ARMWw(rs: *mut HvxVector, rt: i32, mu: i32, vvv: HvxVectorPair) { vgathermhw(rs, rt, mu, vvv) } @@ -4633,7 +4646,7 @@ pub unsafe fn q6_vgather_armww(rs: *mut HvxVector, rt: i32, mu: i32, vvv: HvxVec #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vgathermw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vgather_armvw(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVector) { +pub unsafe fn Q6_vgather_ARMVw(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVector) { vgathermw(rs, rt, mu, vv) } @@ -4645,7 +4658,7 @@ pub unsafe fn q6_vgather_armvw(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vmpabuu))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpa_wubrub(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vmpa_WubRub(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { vmpabuu(vuu, rt) } @@ -4657,7 +4670,7 @@ pub unsafe fn q6_wh_vmpa_wubrub(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vmpabuu_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpaacc_whwubrub( +pub unsafe fn Q6_Wh_vmpaacc_WhWubRub( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -4673,7 +4686,7 @@ pub unsafe fn q6_wh_vmpaacc_whwubrub( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vmpyh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vmpyacc_wwvhrh(vxx: HvxVectorPair, vu: HvxVector, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vmpyacc_WwVhRh(vxx: HvxVectorPair, vu: HvxVector, rt: i32) -> HvxVectorPair { vmpyh_acc(vxx, vu, rt) } @@ -4685,7 +4698,7 @@ pub unsafe fn q6_ww_vmpyacc_wwvhrh(vxx: HvxVectorPair, vu: HvxVector, rt: i32) - #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vmpyuhe))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vmpye_vuhruh(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vuw_vmpye_VuhRuh(vu: HvxVector, rt: i32) -> HvxVector { vmpyuhe(vu, rt) } @@ -4697,7 +4710,7 @@ pub unsafe fn q6_vuw_vmpye_vuhruh(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vmpyuhe_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vmpyeacc_vuwvuhruh(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vuw_vmpyeacc_VuwVuhRuh(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vmpyuhe_acc(vx, vu, rt) } @@ -4709,7 +4722,7 @@ pub unsafe fn q6_vuw_vmpyeacc_vuwvuhruh(vx: HvxVector, vu: HvxVector, rt: i32) - #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vnavgb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vnavg_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vnavg_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { vnavgb(vu, vv) } @@ -4721,7 +4734,7 @@ pub unsafe fn q6_vb_vnavg_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vscatter_rmvhv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { +pub unsafe fn Q6_vscatter_RMVhV(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { vscattermh(rt, mu, vv, vw) } @@ -4733,7 +4746,7 @@ pub unsafe fn q6_vscatter_rmvhv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermh_add))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vscatteracc_rmvhv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { +pub unsafe fn Q6_vscatteracc_RMVhV(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { vscattermh_add(rt, mu, vv, vw) } @@ -4745,7 +4758,7 @@ pub unsafe fn q6_vscatteracc_rmvhv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vscatter_rmwwv(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVector) { +pub unsafe fn Q6_vscatter_RMWwV(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVector) { vscattermhw(rt, mu, vvv, vw) } @@ -4757,7 +4770,7 @@ pub unsafe fn q6_vscatter_rmwwv(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVec #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermhw_add))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vscatteracc_rmwwv(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVector) { +pub unsafe fn Q6_vscatteracc_RMWwV(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVector) { vscattermhw_add(rt, mu, vvv, vw) } @@ -4769,7 +4782,7 @@ pub unsafe fn q6_vscatteracc_rmwwv(rt: i32, mu: i32, vvv: HvxVectorPair, vw: Hvx #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vscatter_rmvwv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { +pub unsafe fn Q6_vscatter_RMVwV(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { vscattermw(rt, mu, vv, vw) } @@ -4781,7 +4794,7 @@ pub unsafe fn q6_vscatter_rmvwv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermw_add))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vscatteracc_rmvwv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { +pub unsafe fn Q6_vscatteracc_RMVwV(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { vscattermw_add(rt, mu, vv, vw) } @@ -4793,7 +4806,7 @@ pub unsafe fn q6_vscatteracc_rmvwv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] #[cfg_attr(test, assert_instr(vasr_into))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vasrinto_wwvwvw( +pub unsafe fn Q6_Ww_vasrinto_WwVwVw( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -4809,7 +4822,7 @@ pub unsafe fn q6_ww_vasrinto_wwvwvw( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] #[cfg_attr(test, assert_instr(vrotr))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vrotr_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuw_vrotr_VuwVuw(vu: HvxVector, vv: HvxVector) -> HvxVector { vrotr(vu, vv) } @@ -4821,7 +4834,7 @@ pub unsafe fn q6_vuw_vrotr_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] #[cfg_attr(test, assert_instr(vsatdw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vsatdw_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vsatdw_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vsatdw(vu, vv) } @@ -4833,7 +4846,7 @@ pub unsafe fn q6_vw_vsatdw_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(v6mpyhubs10))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_v6mpy_wubwbi_h( +pub unsafe fn Q6_Ww_v6mpy_WubWbI_h( vuu: HvxVectorPair, vvv: HvxVectorPair, iu2: i32, @@ -4849,7 +4862,7 @@ pub unsafe fn q6_ww_v6mpy_wubwbi_h( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(v6mpyhubs10_vxx))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_v6mpyacc_wwwubwbi_h( +pub unsafe fn Q6_Ww_v6mpyacc_WwWubWbI_h( vxx: HvxVectorPair, vuu: HvxVectorPair, vvv: HvxVectorPair, @@ -4866,7 +4879,7 @@ pub unsafe fn q6_ww_v6mpyacc_wwwubwbi_h( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(v6mpyvubs10))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_v6mpy_wubwbi_v( +pub unsafe fn Q6_Ww_v6mpy_WubWbI_v( vuu: HvxVectorPair, vvv: HvxVectorPair, iu2: i32, @@ -4882,7 +4895,7 @@ pub unsafe fn q6_ww_v6mpy_wubwbi_v( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(v6mpyvubs10_vxx))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_v6mpyacc_wwwubwbi_v( +pub unsafe fn Q6_Ww_v6mpyacc_WwWubWbI_v( vxx: HvxVectorPair, vuu: HvxVectorPair, vvv: HvxVectorPair, @@ -4899,7 +4912,7 @@ pub unsafe fn q6_ww_v6mpyacc_wwwubwbi_v( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vabs_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vabs_vhf(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vabs_Vhf(vu: HvxVector) -> HvxVector { vabs_hf(vu) } @@ -4911,7 +4924,7 @@ pub unsafe fn q6_vhf_vabs_vhf(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vabs_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_vabs_vsf(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_vabs_Vsf(vu: HvxVector) -> HvxVector { vabs_sf(vu) } @@ -4923,7 +4936,7 @@ pub unsafe fn q6_vsf_vabs_vsf(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf16_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf16_vadd_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vadd_hf(vu, vv) } @@ -4935,7 +4948,7 @@ pub unsafe fn q6_vqf16_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_hf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vadd_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vadd_hf_hf(vu, vv) } @@ -4947,7 +4960,7 @@ pub unsafe fn q6_vhf_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_qf16))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf16_vadd_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf16_vadd_Vqf16Vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { vadd_qf16(vu, vv) } @@ -4959,7 +4972,7 @@ pub unsafe fn q6_vqf16_vadd_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_qf16_mix))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf16_vadd_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf16_vadd_Vqf16Vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vadd_qf16_mix(vu, vv) } @@ -4971,7 +4984,7 @@ pub unsafe fn q6_vqf16_vadd_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_qf32))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf32_vadd_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf32_vadd_Vqf32Vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { vadd_qf32(vu, vv) } @@ -4983,7 +4996,7 @@ pub unsafe fn q6_vqf32_vadd_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_qf32_mix))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf32_vadd_vqf32vsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf32_vadd_Vqf32Vsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vadd_qf32_mix(vu, vv) } @@ -4995,7 +5008,7 @@ pub unsafe fn q6_vqf32_vadd_vqf32vsf(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf32_vadd_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf32_vadd_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vadd_sf(vu, vv) } @@ -5007,7 +5020,7 @@ pub unsafe fn q6_vqf32_vadd_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_sf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wsf_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wsf_vadd_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vadd_sf_hf(vu, vv) } @@ -5019,7 +5032,7 @@ pub unsafe fn q6_wsf_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_sf_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_vadd_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_vadd_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vadd_sf_sf(vu, vv) } @@ -5031,7 +5044,7 @@ pub unsafe fn q6_vsf_vadd_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vassign_fp))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vfmv_vw(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vfmv_Vw(vu: HvxVector) -> HvxVector { vassign_fp(vu) } @@ -5043,7 +5056,7 @@ pub unsafe fn q6_vw_vfmv_vw(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vconv_hf_qf16))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_equals_vqf16(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_equals_Vqf16(vu: HvxVector) -> HvxVector { vconv_hf_qf16(vu) } @@ -5055,7 +5068,7 @@ pub unsafe fn q6_vhf_equals_vqf16(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vconv_hf_qf32))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_equals_wqf32(vuu: HvxVectorPair) -> HvxVector { +pub unsafe fn Q6_Vhf_equals_Wqf32(vuu: HvxVectorPair) -> HvxVector { vconv_hf_qf32(vuu) } @@ -5067,7 +5080,7 @@ pub unsafe fn q6_vhf_equals_wqf32(vuu: HvxVectorPair) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vconv_sf_qf32))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_equals_vqf32(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_equals_Vqf32(vu: HvxVector) -> HvxVector { vconv_sf_qf32(vu) } @@ -5079,7 +5092,7 @@ pub unsafe fn q6_vsf_equals_vqf32(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_b_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vcvt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vcvt_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vcvt_b_hf(vu, vv) } @@ -5091,7 +5104,7 @@ pub unsafe fn q6_vb_vcvt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_h_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vcvt_vhf(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vcvt_Vhf(vu: HvxVector) -> HvxVector { vcvt_h_hf(vu) } @@ -5103,7 +5116,7 @@ pub unsafe fn q6_vh_vcvt_vhf(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_hf_b))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_whf_vcvt_vb(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Whf_vcvt_Vb(vu: HvxVector) -> HvxVectorPair { vcvt_hf_b(vu) } @@ -5115,7 +5128,7 @@ pub unsafe fn q6_whf_vcvt_vb(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_hf_h))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vcvt_vh(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vcvt_Vh(vu: HvxVector) -> HvxVector { vcvt_hf_h(vu) } @@ -5127,7 +5140,7 @@ pub unsafe fn q6_vhf_vcvt_vh(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_hf_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vcvt_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vcvt_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vcvt_hf_sf(vu, vv) } @@ -5139,7 +5152,7 @@ pub unsafe fn q6_vhf_vcvt_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_hf_ub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_whf_vcvt_vub(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Whf_vcvt_Vub(vu: HvxVector) -> HvxVectorPair { vcvt_hf_ub(vu) } @@ -5151,7 +5164,7 @@ pub unsafe fn q6_whf_vcvt_vub(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_hf_uh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vcvt_vuh(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vcvt_Vuh(vu: HvxVector) -> HvxVector { vcvt_hf_uh(vu) } @@ -5163,7 +5176,7 @@ pub unsafe fn q6_vhf_vcvt_vuh(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_sf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wsf_vcvt_vhf(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wsf_vcvt_Vhf(vu: HvxVector) -> HvxVectorPair { vcvt_sf_hf(vu) } @@ -5175,7 +5188,7 @@ pub unsafe fn q6_wsf_vcvt_vhf(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_ub_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vcvt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vcvt_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vcvt_ub_hf(vu, vv) } @@ -5187,7 +5200,7 @@ pub unsafe fn q6_vub_vcvt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_uh_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vcvt_vhf(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vcvt_Vhf(vu: HvxVector) -> HvxVector { vcvt_uh_hf(vu) } @@ -5199,7 +5212,7 @@ pub unsafe fn q6_vuh_vcvt_vhf(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vdmpy_sf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_vdmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_vdmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vdmpy_sf_hf(vu, vv) } @@ -5211,7 +5224,7 @@ pub unsafe fn q6_vsf_vdmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vdmpy_sf_hf_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_vdmpyacc_vsfvhfvhf(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_vdmpyacc_VsfVhfVhf(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { vdmpy_sf_hf_acc(vx, vu, vv) } @@ -5223,7 +5236,7 @@ pub unsafe fn q6_vsf_vdmpyacc_vsfvhfvhf(vx: HvxVector, vu: HvxVector, vv: HvxVec #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfmax_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vfmax_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vfmax_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vfmax_hf(vu, vv) } @@ -5235,7 +5248,7 @@ pub unsafe fn q6_vhf_vfmax_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfmax_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_vfmax_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_vfmax_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vfmax_sf(vu, vv) } @@ -5247,7 +5260,7 @@ pub unsafe fn q6_vsf_vfmax_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfmin_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vfmin_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vfmin_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vfmin_hf(vu, vv) } @@ -5259,7 +5272,7 @@ pub unsafe fn q6_vhf_vfmin_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfmin_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_vfmin_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_vfmin_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vfmin_sf(vu, vv) } @@ -5271,7 +5284,7 @@ pub unsafe fn q6_vsf_vfmin_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfneg_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vfneg_vhf(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vfneg_Vhf(vu: HvxVector) -> HvxVector { vfneg_hf(vu) } @@ -5283,7 +5296,7 @@ pub unsafe fn q6_vhf_vfneg_vhf(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfneg_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_vfneg_vsf(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_vfneg_Vsf(vu: HvxVector) -> HvxVector { vfneg_sf(vu) } @@ -5295,7 +5308,7 @@ pub unsafe fn q6_vsf_vfneg_vsf(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmax_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vmax_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vmax_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vmax_hf(vu, vv) } @@ -5307,7 +5320,7 @@ pub unsafe fn q6_vhf_vmax_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmax_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_vmax_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_vmax_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vmax_sf(vu, vv) } @@ -5319,7 +5332,7 @@ pub unsafe fn q6_vsf_vmax_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmin_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vmin_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vmin_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vmin_hf(vu, vv) } @@ -5331,7 +5344,7 @@ pub unsafe fn q6_vhf_vmin_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmin_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_vmin_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_vmin_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vmin_sf(vu, vv) } @@ -5343,7 +5356,7 @@ pub unsafe fn q6_vsf_vmin_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_hf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpy_hf_hf(vu, vv) } @@ -5355,7 +5368,7 @@ pub unsafe fn q6_vhf_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_hf_hf_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vmpyacc_vhfvhfvhf(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vmpyacc_VhfVhfVhf(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { vmpy_hf_hf_acc(vx, vu, vv) } @@ -5367,7 +5380,7 @@ pub unsafe fn q6_vhf_vmpyacc_vhfvhfvhf(vx: HvxVector, vu: HvxVector, vv: HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf16))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf16_vmpy_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf16_vmpy_Vqf16Vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpy_qf16(vu, vv) } @@ -5379,7 +5392,7 @@ pub unsafe fn q6_vqf16_vmpy_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf16_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf16_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf16_vmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpy_qf16_hf(vu, vv) } @@ -5391,7 +5404,7 @@ pub unsafe fn q6_vqf16_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf16_mix_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf16_vmpy_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf16_vmpy_Vqf16Vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpy_qf16_mix_hf(vu, vv) } @@ -5403,7 +5416,7 @@ pub unsafe fn q6_vqf16_vmpy_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf32))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf32_vmpy_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf32_vmpy_Vqf32Vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpy_qf32(vu, vv) } @@ -5415,7 +5428,7 @@ pub unsafe fn q6_vqf32_vmpy_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf32_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wqf32_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wqf32_vmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vmpy_qf32_hf(vu, vv) } @@ -5427,7 +5440,7 @@ pub unsafe fn q6_wqf32_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPai #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf32_mix_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wqf32_vmpy_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wqf32_vmpy_Vqf16Vhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vmpy_qf32_mix_hf(vu, vv) } @@ -5439,7 +5452,7 @@ pub unsafe fn q6_wqf32_vmpy_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVectorP #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf32_qf16))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wqf32_vmpy_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wqf32_vmpy_Vqf16Vqf16(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vmpy_qf32_qf16(vu, vv) } @@ -5451,7 +5464,7 @@ pub unsafe fn q6_wqf32_vmpy_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf32_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf32_vmpy_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf32_vmpy_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpy_qf32_sf(vu, vv) } @@ -5463,7 +5476,7 @@ pub unsafe fn q6_vqf32_vmpy_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_sf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wsf_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wsf_vmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vmpy_sf_hf(vu, vv) } @@ -5475,7 +5488,7 @@ pub unsafe fn q6_wsf_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_sf_hf_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wsf_vmpyacc_wsfvhfvhf( +pub unsafe fn Q6_Wsf_vmpyacc_WsfVhfVhf( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -5491,7 +5504,7 @@ pub unsafe fn q6_wsf_vmpyacc_wsfvhfvhf( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_sf_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_vmpy_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_vmpy_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpy_sf_sf(vu, vv) } @@ -5503,7 +5516,7 @@ pub unsafe fn q6_vsf_vmpy_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf16_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf16_vsub_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vsub_hf(vu, vv) } @@ -5515,7 +5528,7 @@ pub unsafe fn q6_vqf16_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_hf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vsub_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vsub_hf_hf(vu, vv) } @@ -5527,7 +5540,7 @@ pub unsafe fn q6_vhf_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_qf16))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf16_vsub_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf16_vsub_Vqf16Vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { vsub_qf16(vu, vv) } @@ -5539,7 +5552,7 @@ pub unsafe fn q6_vqf16_vsub_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_qf16_mix))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf16_vsub_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf16_vsub_Vqf16Vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vsub_qf16_mix(vu, vv) } @@ -5551,7 +5564,7 @@ pub unsafe fn q6_vqf16_vsub_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_qf32))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf32_vsub_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf32_vsub_Vqf32Vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { vsub_qf32(vu, vv) } @@ -5563,7 +5576,7 @@ pub unsafe fn q6_vqf32_vsub_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_qf32_mix))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf32_vsub_vqf32vsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf32_vsub_Vqf32Vsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vsub_qf32_mix(vu, vv) } @@ -5575,7 +5588,7 @@ pub unsafe fn q6_vqf32_vsub_vqf32vsf(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf32_vsub_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf32_vsub_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vsub_sf(vu, vv) } @@ -5587,7 +5600,7 @@ pub unsafe fn q6_vqf32_vsub_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_sf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wsf_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wsf_vsub_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vsub_sf_hf(vu, vv) } @@ -5599,7 +5612,7 @@ pub unsafe fn q6_wsf_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_sf_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_vsub_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_vsub_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vsub_sf_sf(vu, vv) } @@ -5611,7 +5624,7 @@ pub unsafe fn q6_vsf_vsub_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] #[cfg_attr(test, assert_instr(vasrvuhubrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vasr_wuhvub_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vasr_WuhVub_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { vasrvuhubrndsat(vuu, vv) } @@ -5623,7 +5636,7 @@ pub unsafe fn q6_vub_vasr_wuhvub_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> H #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] #[cfg_attr(test, assert_instr(vasrvuhubsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vasr_wuhvub_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vasr_WuhVub_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { vasrvuhubsat(vuu, vv) } @@ -5635,7 +5648,7 @@ pub unsafe fn q6_vub_vasr_wuhvub_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVe #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] #[cfg_attr(test, assert_instr(vasrvwuhrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vasr_wwvuh_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vasr_WwVuh_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { vasrvwuhrndsat(vuu, vv) } @@ -5647,7 +5660,7 @@ pub unsafe fn q6_vuh_vasr_wwvuh_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> Hv #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] #[cfg_attr(test, assert_instr(vasrvwuhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vasr_wwvuh_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vasr_WwVuh_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { vasrvwuhsat(vuu, vv) } @@ -5659,7 +5672,7 @@ pub unsafe fn q6_vuh_vasr_wwvuh_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVec #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] #[cfg_attr(test, assert_instr(vmpyuhvs))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vmpy_vuhvuh_rs16(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vmpy_VuhVuh_rs16(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyuhvs(vu, vv) } @@ -5671,7 +5684,7 @@ pub unsafe fn q6_vuh_vmpy_vuhvuh_rs16(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] #[cfg_attr(test, assert_instr(vconv_h_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_equals_vhf(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_equals_Vhf(vu: HvxVector) -> HvxVector { vconv_h_hf(vu) } @@ -5683,7 +5696,7 @@ pub unsafe fn q6_vh_equals_vhf(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] #[cfg_attr(test, assert_instr(vconv_hf_h))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_equals_vh(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_equals_Vh(vu: HvxVector) -> HvxVector { vconv_hf_h(vu) } @@ -5695,7 +5708,7 @@ pub unsafe fn q6_vhf_equals_vh(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] #[cfg_attr(test, assert_instr(vconv_sf_w))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_equals_vw(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_equals_Vw(vu: HvxVector) -> HvxVector { vconv_sf_w(vu) } @@ -5707,7 +5720,7 @@ pub unsafe fn q6_vsf_equals_vw(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] #[cfg_attr(test, assert_instr(vconv_w_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_equals_vsf(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_equals_Vsf(vu: HvxVector) -> HvxVector { vconv_w_sf(vu) } @@ -5719,7 +5732,7 @@ pub unsafe fn q6_vw_equals_vsf(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(get_qfext))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vgetqfext_vr(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_V_vgetqfext_VR(vu: HvxVector, rt: i32) -> HvxVector { get_qfext(vu, rt) } @@ -5731,7 +5744,7 @@ pub unsafe fn q6_v_vgetqfext_vr(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(set_qfext))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vsetqfext_vr(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_V_vsetqfext_VR(vu: HvxVector, rt: i32) -> HvxVector { set_qfext(vu, rt) } @@ -5743,7 +5756,7 @@ pub unsafe fn q6_v_vsetqfext_vr(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vabs_f8))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vabs_v(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vabs_V(vu: HvxVector) -> HvxVector { vabs_f8(vu) } @@ -5755,7 +5768,7 @@ pub unsafe fn q6_v_vabs_v(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vcvt2_hf_b))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_whf_vcvt2_vb(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Whf_vcvt2_Vb(vu: HvxVector) -> HvxVectorPair { vcvt2_hf_b(vu) } @@ -5767,7 +5780,7 @@ pub unsafe fn q6_whf_vcvt2_vb(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vcvt2_hf_ub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_whf_vcvt2_vub(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Whf_vcvt2_Vub(vu: HvxVector) -> HvxVectorPair { vcvt2_hf_ub(vu) } @@ -5779,7 +5792,7 @@ pub unsafe fn q6_whf_vcvt2_vub(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vcvt_hf_f8))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_whf_vcvt_v(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Whf_vcvt_V(vu: HvxVector) -> HvxVectorPair { vcvt_hf_f8(vu) } @@ -5791,7 +5804,7 @@ pub unsafe fn q6_whf_vcvt_v(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vfmax_f8))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vfmax_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vfmax_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { vfmax_f8(vu, vv) } @@ -5803,7 +5816,7 @@ pub unsafe fn q6_v_vfmax_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vfmin_f8))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vfmin_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vfmin_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { vfmin_f8(vu, vv) } @@ -5815,7 +5828,7 @@ pub unsafe fn q6_v_vfmin_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vfneg_f8))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vfneg_v(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vfneg_V(vu: HvxVector) -> HvxVector { vfneg_f8(vu) } @@ -5827,7 +5840,7 @@ pub unsafe fn q6_v_vfneg_v(vu: HvxVector) -> HvxVector { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_and_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { +pub unsafe fn Q6_Q_and_QQ(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { core::mem::transmute::(vandqrt( pred_and( vandvrt(core::mem::transmute::(qs), -1), @@ -5845,7 +5858,7 @@ pub unsafe fn q6_q_and_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_and_qqn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { +pub unsafe fn Q6_Q_and_QQn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { core::mem::transmute::(vandqrt( pred_and_n( vandvrt(core::mem::transmute::(qs), -1), @@ -5863,7 +5876,7 @@ pub unsafe fn q6_q_and_qqn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPre #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_not_q(qs: HvxVectorPred) -> HvxVectorPred { +pub unsafe fn Q6_Q_not_Q(qs: HvxVectorPred) -> HvxVectorPred { core::mem::transmute::(vandqrt( pred_not(vandvrt( core::mem::transmute::(qs), @@ -5881,7 +5894,7 @@ pub unsafe fn q6_q_not_q(qs: HvxVectorPred) -> HvxVectorPred { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_or_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { +pub unsafe fn Q6_Q_or_QQ(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { core::mem::transmute::(vandqrt( pred_or( vandvrt(core::mem::transmute::(qs), -1), @@ -5899,7 +5912,7 @@ pub unsafe fn q6_q_or_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_or_qqn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { +pub unsafe fn Q6_Q_or_QQn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { core::mem::transmute::(vandqrt( pred_or_n( vandvrt(core::mem::transmute::(qs), -1), @@ -5917,7 +5930,7 @@ pub unsafe fn q6_q_or_qqn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vsetq_r(rt: i32) -> HvxVectorPred { +pub unsafe fn Q6_Q_vsetq_R(rt: i32) -> HvxVectorPred { core::mem::transmute::(vandqrt(pred_scalar2(rt), -1)) } @@ -5929,7 +5942,7 @@ pub unsafe fn q6_q_vsetq_r(rt: i32) -> HvxVectorPred { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_xor_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { +pub unsafe fn Q6_Q_xor_QQ(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { core::mem::transmute::(vandqrt( pred_xor( vandvrt(core::mem::transmute::(qs), -1), @@ -5947,7 +5960,7 @@ pub unsafe fn q6_q_xor_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vmem_qnriv(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { +pub unsafe fn Q6_vmem_QnRIV(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { vS32b_nqpred_ai( vandvrt(core::mem::transmute::(qv), -1), rt, @@ -5963,7 +5976,7 @@ pub unsafe fn q6_vmem_qnriv(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vmem_qnriv_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { +pub unsafe fn Q6_vmem_QnRIV_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { vS32b_nt_nqpred_ai( vandvrt(core::mem::transmute::(qv), -1), rt, @@ -5979,7 +5992,7 @@ pub unsafe fn q6_vmem_qnriv_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVec #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vmem_qriv_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { +pub unsafe fn Q6_vmem_QRIV_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { vS32b_nt_qpred_ai( vandvrt(core::mem::transmute::(qv), -1), rt, @@ -5995,7 +6008,7 @@ pub unsafe fn q6_vmem_qriv_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVect #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vmem_qriv(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { +pub unsafe fn Q6_vmem_QRIV(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { vS32b_qpred_ai( vandvrt(core::mem::transmute::(qv), -1), rt, @@ -6011,7 +6024,7 @@ pub unsafe fn q6_vmem_qriv(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_condacc_qnvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_condacc_QnVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vaddbnq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -6027,7 +6040,7 @@ pub unsafe fn q6_vb_condacc_qnvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_condacc_qvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_condacc_QVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vaddbq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -6043,7 +6056,7 @@ pub unsafe fn q6_vb_condacc_qvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_condacc_qnvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_condacc_QnVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vaddhnq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -6059,7 +6072,7 @@ pub unsafe fn q6_vh_condacc_qnvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_condacc_qvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_condacc_QVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vaddhq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -6075,7 +6088,7 @@ pub unsafe fn q6_vh_condacc_qvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_condacc_qnvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_condacc_QnVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vaddwnq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -6091,7 +6104,7 @@ pub unsafe fn q6_vw_condacc_qnvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_condacc_qvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_condacc_QVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vaddwq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -6107,7 +6120,7 @@ pub unsafe fn q6_vw_condacc_qvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vand_qr(qu: HvxVectorPred, rt: i32) -> HvxVector { +pub unsafe fn Q6_V_vand_QR(qu: HvxVectorPred, rt: i32) -> HvxVector { vandvrt(core::mem::transmute::(qu), rt) } @@ -6119,7 +6132,7 @@ pub unsafe fn q6_v_vand_qr(qu: HvxVectorPred, rt: i32) -> HvxVector { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vandor_vqr(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector { +pub unsafe fn Q6_V_vandor_VQR(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector { vandvrt_acc(vx, core::mem::transmute::(qu), rt) } @@ -6131,7 +6144,7 @@ pub unsafe fn q6_v_vandor_vqr(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxV #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vand_vr(vu: HvxVector, rt: i32) -> HvxVectorPred { +pub unsafe fn Q6_Q_vand_VR(vu: HvxVector, rt: i32) -> HvxVectorPred { core::mem::transmute::(vandqrt(vu, rt)) } @@ -6143,7 +6156,7 @@ pub unsafe fn q6_q_vand_vr(vu: HvxVector, rt: i32) -> HvxVectorPred { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vandor_qvr(qx: HvxVectorPred, vu: HvxVector, rt: i32) -> HvxVectorPred { +pub unsafe fn Q6_Q_vandor_QVR(qx: HvxVectorPred, vu: HvxVector, rt: i32) -> HvxVectorPred { core::mem::transmute::(vandqrt_acc( core::mem::transmute::(qx), vu, @@ -6159,7 +6172,7 @@ pub unsafe fn q6_q_vandor_qvr(qx: HvxVectorPred, vu: HvxVector, rt: i32) -> HvxV #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eq_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { +pub unsafe fn Q6_Q_vcmp_eq_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { core::mem::transmute::(vandqrt(veqb(vu, vv), -1)) } @@ -6171,7 +6184,7 @@ pub unsafe fn q6_q_vcmp_eq_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eqand_qvbvb( +pub unsafe fn Q6_Q_vcmp_eqand_QVbVb( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6194,7 +6207,7 @@ pub unsafe fn q6_q_vcmp_eqand_qvbvb( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eqor_qvbvb( +pub unsafe fn Q6_Q_vcmp_eqor_QVbVb( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6217,7 +6230,7 @@ pub unsafe fn q6_q_vcmp_eqor_qvbvb( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eqxacc_qvbvb( +pub unsafe fn Q6_Q_vcmp_eqxacc_QVbVb( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6240,7 +6253,7 @@ pub unsafe fn q6_q_vcmp_eqxacc_qvbvb( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eq_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { +pub unsafe fn Q6_Q_vcmp_eq_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { core::mem::transmute::(vandqrt(veqh(vu, vv), -1)) } @@ -6252,7 +6265,7 @@ pub unsafe fn q6_q_vcmp_eq_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eqand_qvhvh( +pub unsafe fn Q6_Q_vcmp_eqand_QVhVh( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6275,7 +6288,7 @@ pub unsafe fn q6_q_vcmp_eqand_qvhvh( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eqor_qvhvh( +pub unsafe fn Q6_Q_vcmp_eqor_QVhVh( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6298,7 +6311,7 @@ pub unsafe fn q6_q_vcmp_eqor_qvhvh( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eqxacc_qvhvh( +pub unsafe fn Q6_Q_vcmp_eqxacc_QVhVh( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6321,7 +6334,7 @@ pub unsafe fn q6_q_vcmp_eqxacc_qvhvh( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eq_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { +pub unsafe fn Q6_Q_vcmp_eq_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { core::mem::transmute::(vandqrt(veqw(vu, vv), -1)) } @@ -6333,7 +6346,7 @@ pub unsafe fn q6_q_vcmp_eq_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eqand_qvwvw( +pub unsafe fn Q6_Q_vcmp_eqand_QVwVw( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6356,7 +6369,7 @@ pub unsafe fn q6_q_vcmp_eqand_qvwvw( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eqor_qvwvw( +pub unsafe fn Q6_Q_vcmp_eqor_QVwVw( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6379,7 +6392,7 @@ pub unsafe fn q6_q_vcmp_eqor_qvwvw( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eqxacc_qvwvw( +pub unsafe fn Q6_Q_vcmp_eqxacc_QVwVw( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6402,7 +6415,7 @@ pub unsafe fn q6_q_vcmp_eqxacc_qvwvw( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gt_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { +pub unsafe fn Q6_Q_vcmp_gt_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { core::mem::transmute::(vandqrt(vgtb(vu, vv), -1)) } @@ -6414,7 +6427,7 @@ pub unsafe fn q6_q_vcmp_gt_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtand_qvbvb( +pub unsafe fn Q6_Q_vcmp_gtand_QVbVb( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6437,7 +6450,7 @@ pub unsafe fn q6_q_vcmp_gtand_qvbvb( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtor_qvbvb( +pub unsafe fn Q6_Q_vcmp_gtor_QVbVb( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6460,7 +6473,7 @@ pub unsafe fn q6_q_vcmp_gtor_qvbvb( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtxacc_qvbvb( +pub unsafe fn Q6_Q_vcmp_gtxacc_QVbVb( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6483,7 +6496,7 @@ pub unsafe fn q6_q_vcmp_gtxacc_qvbvb( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gt_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { +pub unsafe fn Q6_Q_vcmp_gt_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { core::mem::transmute::(vandqrt(vgth(vu, vv), -1)) } @@ -6495,7 +6508,7 @@ pub unsafe fn q6_q_vcmp_gt_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtand_qvhvh( +pub unsafe fn Q6_Q_vcmp_gtand_QVhVh( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6518,7 +6531,7 @@ pub unsafe fn q6_q_vcmp_gtand_qvhvh( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtor_qvhvh( +pub unsafe fn Q6_Q_vcmp_gtor_QVhVh( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6541,7 +6554,7 @@ pub unsafe fn q6_q_vcmp_gtor_qvhvh( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtxacc_qvhvh( +pub unsafe fn Q6_Q_vcmp_gtxacc_QVhVh( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6564,7 +6577,7 @@ pub unsafe fn q6_q_vcmp_gtxacc_qvhvh( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gt_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { +pub unsafe fn Q6_Q_vcmp_gt_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { core::mem::transmute::(vandqrt(vgtub(vu, vv), -1)) } @@ -6576,7 +6589,7 @@ pub unsafe fn q6_q_vcmp_gt_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPred #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtand_qvubvub( +pub unsafe fn Q6_Q_vcmp_gtand_QVubVub( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6599,7 +6612,7 @@ pub unsafe fn q6_q_vcmp_gtand_qvubvub( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtor_qvubvub( +pub unsafe fn Q6_Q_vcmp_gtor_QVubVub( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6622,7 +6635,7 @@ pub unsafe fn q6_q_vcmp_gtor_qvubvub( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtxacc_qvubvub( +pub unsafe fn Q6_Q_vcmp_gtxacc_QVubVub( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6645,7 +6658,7 @@ pub unsafe fn q6_q_vcmp_gtxacc_qvubvub( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gt_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { +pub unsafe fn Q6_Q_vcmp_gt_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { core::mem::transmute::(vandqrt(vgtuh(vu, vv), -1)) } @@ -6657,7 +6670,7 @@ pub unsafe fn q6_q_vcmp_gt_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtand_qvuhvuh( +pub unsafe fn Q6_Q_vcmp_gtand_QVuhVuh( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6680,7 +6693,7 @@ pub unsafe fn q6_q_vcmp_gtand_qvuhvuh( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtor_qvuhvuh( +pub unsafe fn Q6_Q_vcmp_gtor_QVuhVuh( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6703,7 +6716,7 @@ pub unsafe fn q6_q_vcmp_gtor_qvuhvuh( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtxacc_qvuhvuh( +pub unsafe fn Q6_Q_vcmp_gtxacc_QVuhVuh( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6726,7 +6739,7 @@ pub unsafe fn q6_q_vcmp_gtxacc_qvuhvuh( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gt_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { +pub unsafe fn Q6_Q_vcmp_gt_VuwVuw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { core::mem::transmute::(vandqrt(vgtuw(vu, vv), -1)) } @@ -6738,7 +6751,7 @@ pub unsafe fn q6_q_vcmp_gt_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtand_qvuwvuw( +pub unsafe fn Q6_Q_vcmp_gtand_QVuwVuw( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6761,7 +6774,7 @@ pub unsafe fn q6_q_vcmp_gtand_qvuwvuw( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtor_qvuwvuw( +pub unsafe fn Q6_Q_vcmp_gtor_QVuwVuw( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6784,7 +6797,7 @@ pub unsafe fn q6_q_vcmp_gtor_qvuwvuw( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtxacc_qvuwvuw( +pub unsafe fn Q6_Q_vcmp_gtxacc_QVuwVuw( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6807,7 +6820,7 @@ pub unsafe fn q6_q_vcmp_gtxacc_qvuwvuw( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gt_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { +pub unsafe fn Q6_Q_vcmp_gt_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { core::mem::transmute::(vandqrt(vgtw(vu, vv), -1)) } @@ -6819,7 +6832,7 @@ pub unsafe fn q6_q_vcmp_gt_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtand_qvwvw( +pub unsafe fn Q6_Q_vcmp_gtand_QVwVw( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6842,7 +6855,7 @@ pub unsafe fn q6_q_vcmp_gtand_qvwvw( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtor_qvwvw( +pub unsafe fn Q6_Q_vcmp_gtor_QVwVw( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6865,7 +6878,7 @@ pub unsafe fn q6_q_vcmp_gtor_qvwvw( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtxacc_qvwvw( +pub unsafe fn Q6_Q_vcmp_gtxacc_QVwVw( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6888,7 +6901,7 @@ pub unsafe fn q6_q_vcmp_gtxacc_qvwvw( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vmux_qvv(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vmux_QVV(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVector { vmux( vandvrt(core::mem::transmute::(qt), -1), vu, @@ -6904,7 +6917,7 @@ pub unsafe fn q6_v_vmux_qvv(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_condnac_qnvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_condnac_QnVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vsubbnq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -6920,7 +6933,7 @@ pub unsafe fn q6_vb_condnac_qnvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_condnac_qvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_condnac_QVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vsubbq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -6936,7 +6949,7 @@ pub unsafe fn q6_vb_condnac_qvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_condnac_qnvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_condnac_QnVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vsubhnq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -6952,7 +6965,7 @@ pub unsafe fn q6_vh_condnac_qnvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_condnac_qvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_condnac_QVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vsubhq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -6968,7 +6981,7 @@ pub unsafe fn q6_vh_condnac_qvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_condnac_qnvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_condnac_QnVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vsubwnq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -6984,7 +6997,7 @@ pub unsafe fn q6_vw_condnac_qnvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_condnac_qvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_condnac_QVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vsubwq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -7000,7 +7013,7 @@ pub unsafe fn q6_vw_condnac_qvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_w_vswap_qvv(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_W_vswap_QVV(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vswap( vandvrt(core::mem::transmute::(qt), -1), vu, @@ -7016,7 +7029,7 @@ pub unsafe fn q6_w_vswap_qvv(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vsetq2_r(rt: i32) -> HvxVectorPred { +pub unsafe fn Q6_Q_vsetq2_R(rt: i32) -> HvxVectorPred { core::mem::transmute::(vandqrt(pred_scalar2v2(rt), -1)) } @@ -7028,7 +7041,7 @@ pub unsafe fn q6_q_vsetq2_r(rt: i32) -> HvxVectorPred { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_qb_vshuffe_qhqh(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { +pub unsafe fn Q6_Qb_vshuffe_QhQh(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { core::mem::transmute::(vandqrt( shuffeqh( vandvrt(core::mem::transmute::(qs), -1), @@ -7046,7 +7059,7 @@ pub unsafe fn q6_qb_vshuffe_qhqh(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVec #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_qh_vshuffe_qwqw(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { +pub unsafe fn Q6_Qh_vshuffe_QwQw(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { core::mem::transmute::(vandqrt( shuffeqw( vandvrt(core::mem::transmute::(qs), -1), @@ -7064,7 +7077,7 @@ pub unsafe fn q6_qh_vshuffe_qwqw(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVec #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vand_qnr(qu: HvxVectorPred, rt: i32) -> HvxVector { +pub unsafe fn Q6_V_vand_QnR(qu: HvxVectorPred, rt: i32) -> HvxVector { vandnqrt( vandvrt(core::mem::transmute::(qu), -1), rt, @@ -7079,7 +7092,7 @@ pub unsafe fn q6_v_vand_qnr(qu: HvxVectorPred, rt: i32) -> HvxVector { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vandor_vqnr(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector { +pub unsafe fn Q6_V_vandor_VQnR(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector { vandnqrt_acc( vx, vandvrt(core::mem::transmute::(qu), -1), @@ -7095,7 +7108,7 @@ pub unsafe fn q6_v_vandor_vqnr(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> Hvx #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vand_qnv(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vand_QnV(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { vandvnqv( vandvrt(core::mem::transmute::(qv), -1), vu, @@ -7110,7 +7123,7 @@ pub unsafe fn q6_v_vand_qnv(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vand_qv(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vand_QV(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { vandvqv( vandvrt(core::mem::transmute::(qv), -1), vu, @@ -7125,7 +7138,7 @@ pub unsafe fn q6_v_vand_qv(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vgather_aqrmvh( +pub unsafe fn Q6_vgather_AQRMVh( rs: *mut HvxVector, qs: HvxVectorPred, rt: i32, @@ -7149,7 +7162,7 @@ pub unsafe fn q6_vgather_aqrmvh( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vgather_aqrmww( +pub unsafe fn Q6_vgather_AQRMWw( rs: *mut HvxVector, qs: HvxVectorPred, rt: i32, @@ -7173,7 +7186,7 @@ pub unsafe fn q6_vgather_aqrmww( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vgather_aqrmvw( +pub unsafe fn Q6_vgather_AQRMVw( rs: *mut HvxVector, qs: HvxVectorPred, rt: i32, @@ -7197,7 +7210,7 @@ pub unsafe fn q6_vgather_aqrmvw( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_prefixsum_q(qv: HvxVectorPred) -> HvxVector { +pub unsafe fn Q6_Vb_prefixsum_Q(qv: HvxVectorPred) -> HvxVector { vprefixqb(vandvrt( core::mem::transmute::(qv), -1, @@ -7212,7 +7225,7 @@ pub unsafe fn q6_vb_prefixsum_q(qv: HvxVectorPred) -> HvxVector { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_prefixsum_q(qv: HvxVectorPred) -> HvxVector { +pub unsafe fn Q6_Vh_prefixsum_Q(qv: HvxVectorPred) -> HvxVector { vprefixqh(vandvrt( core::mem::transmute::(qv), -1, @@ -7227,7 +7240,7 @@ pub unsafe fn q6_vh_prefixsum_q(qv: HvxVectorPred) -> HvxVector { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_prefixsum_q(qv: HvxVectorPred) -> HvxVector { +pub unsafe fn Q6_Vw_prefixsum_Q(qv: HvxVectorPred) -> HvxVector { vprefixqw(vandvrt( core::mem::transmute::(qv), -1, @@ -7242,7 +7255,7 @@ pub unsafe fn q6_vw_prefixsum_q(qv: HvxVectorPred) -> HvxVector { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vscatter_qrmvhv( +pub unsafe fn Q6_vscatter_QRMVhV( qs: HvxVectorPred, rt: i32, mu: i32, @@ -7266,7 +7279,7 @@ pub unsafe fn q6_vscatter_qrmvhv( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vscatter_qrmwwv( +pub unsafe fn Q6_vscatter_QRMWwV( qs: HvxVectorPred, rt: i32, mu: i32, @@ -7290,7 +7303,7 @@ pub unsafe fn q6_vscatter_qrmwwv( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vscatter_qrmvwv( +pub unsafe fn Q6_vscatter_QRMVwV( qs: HvxVectorPred, rt: i32, mu: i32, @@ -7314,7 +7327,7 @@ pub unsafe fn q6_vscatter_qrmvwv( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vadd_vwvwq_carry_sat( +pub unsafe fn Q6_Vw_vadd_VwVwQ_carry_sat( vu: HvxVector, vv: HvxVector, qs: HvxVectorPred, @@ -7334,7 +7347,7 @@ pub unsafe fn q6_vw_vadd_vwvwq_carry_sat( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { +pub unsafe fn Q6_Q_vcmp_gt_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { core::mem::transmute::(vandqrt(vgthf(vu, vv), -1)) } @@ -7346,7 +7359,7 @@ pub unsafe fn q6_q_vcmp_gt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtand_qvhfvhf( +pub unsafe fn Q6_Q_vcmp_gtand_QVhfVhf( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -7369,7 +7382,7 @@ pub unsafe fn q6_q_vcmp_gtand_qvhfvhf( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtor_qvhfvhf( +pub unsafe fn Q6_Q_vcmp_gtor_QVhfVhf( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -7392,7 +7405,7 @@ pub unsafe fn q6_q_vcmp_gtor_qvhfvhf( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtxacc_qvhfvhf( +pub unsafe fn Q6_Q_vcmp_gtxacc_QVhfVhf( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -7415,7 +7428,7 @@ pub unsafe fn q6_q_vcmp_gtxacc_qvhfvhf( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gt_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { +pub unsafe fn Q6_Q_vcmp_gt_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { core::mem::transmute::(vandqrt(vgtsf(vu, vv), -1)) } @@ -7427,7 +7440,7 @@ pub unsafe fn q6_q_vcmp_gt_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtand_qvsfvsf( +pub unsafe fn Q6_Q_vcmp_gtand_QVsfVsf( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -7450,7 +7463,7 @@ pub unsafe fn q6_q_vcmp_gtand_qvsfvsf( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtor_qvsfvsf( +pub unsafe fn Q6_Q_vcmp_gtor_QVsfVsf( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -7473,7 +7486,7 @@ pub unsafe fn q6_q_vcmp_gtor_qvsfvsf( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtxacc_qvsfvsf( +pub unsafe fn Q6_Q_vcmp_gtxacc_QVsfVsf( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, diff --git a/library/stdarch/crates/core_arch/src/hexagon/v64.rs b/library/stdarch/crates/core_arch/src/hexagon/v64.rs index 023a8711d21f3..517a807db4ae2 100644 --- a/library/stdarch/crates/core_arch/src/hexagon/v64.rs +++ b/library/stdarch/crates/core_arch/src/hexagon/v64.rs @@ -15,6 +15,18 @@ //! //! To use this module, compile with `-C target-feature=+hvx-length64b`. //! +//! ## Naming Convention +//! +//! Function names preserve the original Q6 naming case because the convention +//! uses case to distinguish register types: +//! - `W` (uppercase) = vector pair (`HvxVectorPair`) +//! - `V` (uppercase) = vector (`HvxVector`) +//! - `Q` (uppercase) = predicate (`HvxVectorPred`) +//! - `R` = scalar register (`i32`) +//! +//! For example, `Q6_W_vcombine_VV` operates on a vector pair while +//! `Q6_V_hi_W` extracts a vector from a pair. +//! //! ## Architecture Versions //! //! Different intrinsics require different HVX architecture versions. Use the @@ -31,6 +43,7 @@ //! Each version includes all features from previous versions. #![allow(non_camel_case_types)] +#![allow(non_snake_case)] #[cfg(test)] use stdarch_test::assert_instr; @@ -1034,7 +1047,7 @@ unsafe extern "unadjusted" { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(extractw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_r_vextract_vr(vu: HvxVector, rs: i32) -> i32 { +pub unsafe fn Q6_R_vextract_VR(vu: HvxVector, rs: i32) -> i32 { extractw(vu, rs) } @@ -1046,7 +1059,7 @@ pub unsafe fn q6_r_vextract_vr(vu: HvxVector, rs: i32) -> i32 { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(hi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_hi_w(vss: HvxVectorPair) -> HvxVector { +pub unsafe fn Q6_V_hi_W(vss: HvxVectorPair) -> HvxVector { hi(vss) } @@ -1058,7 +1071,7 @@ pub unsafe fn q6_v_hi_w(vss: HvxVectorPair) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(lo))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_lo_w(vss: HvxVectorPair) -> HvxVector { +pub unsafe fn Q6_V_lo_W(vss: HvxVectorPair) -> HvxVector { lo(vss) } @@ -1070,7 +1083,7 @@ pub unsafe fn q6_v_lo_w(vss: HvxVectorPair) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(lvsplatw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vsplat_r(rt: i32) -> HvxVector { +pub unsafe fn Q6_V_vsplat_R(rt: i32) -> HvxVector { lvsplatw(rt) } @@ -1082,7 +1095,7 @@ pub unsafe fn q6_v_vsplat_r(rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsdiffh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vabsdiff_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vabsdiff_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vabsdiffh(vu, vv) } @@ -1094,7 +1107,7 @@ pub unsafe fn q6_vuh_vabsdiff_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsdiffub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vabsdiff_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vabsdiff_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector { vabsdiffub(vu, vv) } @@ -1106,7 +1119,7 @@ pub unsafe fn q6_vub_vabsdiff_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsdiffuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vabsdiff_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vabsdiff_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVector { vabsdiffuh(vu, vv) } @@ -1118,7 +1131,7 @@ pub unsafe fn q6_vuh_vabsdiff_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsdiffw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vabsdiff_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuw_vabsdiff_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vabsdiffw(vu, vv) } @@ -1130,7 +1143,7 @@ pub unsafe fn q6_vuw_vabsdiff_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vabs_vh(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vabs_Vh(vu: HvxVector) -> HvxVector { vabsh(vu) } @@ -1142,7 +1155,7 @@ pub unsafe fn q6_vh_vabs_vh(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsh_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vabs_vh_sat(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vabs_Vh_sat(vu: HvxVector) -> HvxVector { vabsh_sat(vu) } @@ -1154,7 +1167,7 @@ pub unsafe fn q6_vh_vabs_vh_sat(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vabs_vw(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vabs_Vw(vu: HvxVector) -> HvxVector { vabsw(vu) } @@ -1166,7 +1179,7 @@ pub unsafe fn q6_vw_vabs_vw(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsw_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vabs_vw_sat(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vabs_Vw_sat(vu: HvxVector) -> HvxVector { vabsw_sat(vu) } @@ -1178,7 +1191,7 @@ pub unsafe fn q6_vw_vabs_vw_sat(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vadd_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vadd_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { vaddb(vu, vv) } @@ -1190,7 +1203,7 @@ pub unsafe fn q6_vb_vadd_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddb_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wb_vadd_wbwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wb_vadd_WbWb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vaddb_dv(vuu, vvv) } @@ -1202,7 +1215,7 @@ pub unsafe fn q6_wb_vadd_wbwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vadd_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vadd_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vaddh(vu, vv) } @@ -1214,7 +1227,7 @@ pub unsafe fn q6_vh_vadd_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddh_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vadd_whwh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vadd_WhWh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vaddh_dv(vuu, vvv) } @@ -1226,7 +1239,7 @@ pub unsafe fn q6_wh_vadd_whwh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vadd_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vadd_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vaddhsat(vu, vv) } @@ -1238,7 +1251,7 @@ pub unsafe fn q6_vh_vadd_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddhsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vadd_whwh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vadd_WhWh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vaddhsat_dv(vuu, vvv) } @@ -1250,7 +1263,7 @@ pub unsafe fn q6_wh_vadd_whwh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vadd_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vadd_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vaddhw(vu, vv) } @@ -1262,7 +1275,7 @@ pub unsafe fn q6_ww_vadd_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddubh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vadd_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vadd_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vaddubh(vu, vv) } @@ -1274,7 +1287,7 @@ pub unsafe fn q6_wh_vadd_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddubsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vadd_vubvub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vadd_VubVub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vaddubsat(vu, vv) } @@ -1286,7 +1299,7 @@ pub unsafe fn q6_vub_vadd_vubvub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddubsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wub_vadd_wubwub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wub_vadd_WubWub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vaddubsat_dv(vuu, vvv) } @@ -1298,7 +1311,7 @@ pub unsafe fn q6_wub_vadd_wubwub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vadduhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vadd_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vadd_VuhVuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vadduhsat(vu, vv) } @@ -1310,7 +1323,7 @@ pub unsafe fn q6_vuh_vadd_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vadduhsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuh_vadd_wuhwuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wuh_vadd_WuhWuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vadduhsat_dv(vuu, vvv) } @@ -1322,7 +1335,7 @@ pub unsafe fn q6_wuh_vadd_wuhwuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vadduhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vadd_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vadd_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vadduhw(vu, vv) } @@ -1334,7 +1347,7 @@ pub unsafe fn q6_ww_vadd_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vadd_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vadd_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { simd_add(vu, vv) } @@ -1346,7 +1359,7 @@ pub unsafe fn q6_vw_vadd_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddw_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vadd_wwww(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vadd_WwWw(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vaddw_dv(vuu, vvv) } @@ -1358,7 +1371,7 @@ pub unsafe fn q6_ww_vadd_wwww(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddwsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vadd_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vadd_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vaddwsat(vu, vv) } @@ -1370,7 +1383,7 @@ pub unsafe fn q6_vw_vadd_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddwsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vadd_wwww_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vadd_WwWw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vaddwsat_dv(vuu, vvv) } @@ -1382,7 +1395,7 @@ pub unsafe fn q6_ww_vadd_wwww_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(valignb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_valign_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_V_valign_VVR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { valignb(vu, vv, rt) } @@ -1394,7 +1407,7 @@ pub unsafe fn q6_v_valign_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(valignbi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_valign_vvi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { +pub unsafe fn Q6_V_valign_VVI(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { valignbi(vu, vv, iu3) } @@ -1406,7 +1419,7 @@ pub unsafe fn q6_v_valign_vvi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vand))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vand_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vand_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { simd_and(vu, vv) } @@ -1418,7 +1431,7 @@ pub unsafe fn q6_v_vand_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaslh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vasl_vhr(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vasl_VhR(vu: HvxVector, rt: i32) -> HvxVector { vaslh(vu, rt) } @@ -1430,7 +1443,7 @@ pub unsafe fn q6_vh_vasl_vhr(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaslhv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vasl_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vasl_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vaslhv(vu, vv) } @@ -1442,7 +1455,7 @@ pub unsafe fn q6_vh_vasl_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaslw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vasl_vwr(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vasl_VwR(vu: HvxVector, rt: i32) -> HvxVector { vaslw(vu, rt) } @@ -1454,7 +1467,7 @@ pub unsafe fn q6_vw_vasl_vwr(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaslw_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vaslacc_vwvwr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vaslacc_VwVwR(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vaslw_acc(vx, vu, rt) } @@ -1466,7 +1479,7 @@ pub unsafe fn q6_vw_vaslacc_vwvwr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxV #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaslwv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vasl_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vasl_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vaslwv(vu, vv) } @@ -1478,7 +1491,7 @@ pub unsafe fn q6_vw_vasl_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vasr_vhr(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vasr_VhR(vu: HvxVector, rt: i32) -> HvxVector { vasrh(vu, rt) } @@ -1490,7 +1503,7 @@ pub unsafe fn q6_vh_vasr_vhr(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrhbrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vasr_vhvhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vb_vasr_VhVhR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasrhbrndsat(vu, vv, rt) } @@ -1502,7 +1515,7 @@ pub unsafe fn q6_vb_vasr_vhvhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrhubrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vasr_vhvhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vub_vasr_VhVhR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasrhubrndsat(vu, vv, rt) } @@ -1514,7 +1527,7 @@ pub unsafe fn q6_vub_vasr_vhvhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) - #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrhubsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vasr_vhvhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vub_vasr_VhVhR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasrhubsat(vu, vv, rt) } @@ -1526,7 +1539,7 @@ pub unsafe fn q6_vub_vasr_vhvhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> Hv #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrhv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vasr_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vasr_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vasrhv(vu, vv) } @@ -1538,7 +1551,7 @@ pub unsafe fn q6_vh_vasr_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vasr_vwr(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vasr_VwR(vu: HvxVector, rt: i32) -> HvxVector { vasrw(vu, rt) } @@ -1550,7 +1563,7 @@ pub unsafe fn q6_vw_vasr_vwr(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrw_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vasracc_vwvwr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vasracc_VwVwR(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vasrw_acc(vx, vu, rt) } @@ -1562,7 +1575,7 @@ pub unsafe fn q6_vw_vasracc_vwvwr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxV #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrwh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vasr_vwvwr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vasr_VwVwR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasrwh(vu, vv, rt) } @@ -1574,7 +1587,7 @@ pub unsafe fn q6_vh_vasr_vwvwr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrwhrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vasr_vwvwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vasr_VwVwR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasrwhrndsat(vu, vv, rt) } @@ -1586,7 +1599,7 @@ pub unsafe fn q6_vh_vasr_vwvwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrwhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vasr_vwvwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vasr_VwVwR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasrwhsat(vu, vv, rt) } @@ -1598,7 +1611,7 @@ pub unsafe fn q6_vh_vasr_vwvwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> Hvx #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrwuhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vasr_vwvwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vuh_vasr_VwVwR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasrwuhsat(vu, vv, rt) } @@ -1610,7 +1623,7 @@ pub unsafe fn q6_vuh_vasr_vwvwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> Hv #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrwv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vasr_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vasr_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vasrwv(vu, vv) } @@ -1622,7 +1635,7 @@ pub unsafe fn q6_vw_vasr_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vassign))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_equals_v(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_equals_V(vu: HvxVector) -> HvxVector { vassign(vu) } @@ -1634,7 +1647,7 @@ pub unsafe fn q6_v_equals_v(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vassignp))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_w_equals_w(vuu: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_W_equals_W(vuu: HvxVectorPair) -> HvxVectorPair { vassignp(vuu) } @@ -1646,7 +1659,7 @@ pub unsafe fn q6_w_equals_w(vuu: HvxVectorPair) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavgh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vavg_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vavg_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vavgh(vu, vv) } @@ -1658,7 +1671,7 @@ pub unsafe fn q6_vh_vavg_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavghrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vavg_vhvh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vavg_VhVh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { vavghrnd(vu, vv) } @@ -1670,7 +1683,7 @@ pub unsafe fn q6_vh_vavg_vhvh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavgub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vavg_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vavg_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector { vavgub(vu, vv) } @@ -1682,7 +1695,7 @@ pub unsafe fn q6_vub_vavg_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavgubrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vavg_vubvub_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vavg_VubVub_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { vavgubrnd(vu, vv) } @@ -1694,7 +1707,7 @@ pub unsafe fn q6_vub_vavg_vubvub_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavguh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vavg_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vavg_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVector { vavguh(vu, vv) } @@ -1706,7 +1719,7 @@ pub unsafe fn q6_vuh_vavg_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavguhrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vavg_vuhvuh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vavg_VuhVuh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { vavguhrnd(vu, vv) } @@ -1718,7 +1731,7 @@ pub unsafe fn q6_vuh_vavg_vuhvuh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavgw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vavg_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vavg_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vavgw(vu, vv) } @@ -1730,7 +1743,7 @@ pub unsafe fn q6_vw_vavg_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavgwrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vavg_vwvw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vavg_VwVw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { vavgwrnd(vu, vv) } @@ -1742,7 +1755,7 @@ pub unsafe fn q6_vw_vavg_vwvw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vcl0h))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vcl0_vuh(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vcl0_Vuh(vu: HvxVector) -> HvxVector { vcl0h(vu) } @@ -1754,7 +1767,7 @@ pub unsafe fn q6_vuh_vcl0_vuh(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vcl0w))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vcl0_vuw(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuw_vcl0_Vuw(vu: HvxVector) -> HvxVector { vcl0w(vu) } @@ -1766,7 +1779,7 @@ pub unsafe fn q6_vuw_vcl0_vuw(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vcombine))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_w_vcombine_vv(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_W_vcombine_VV(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vcombine(vu, vv) } @@ -1778,7 +1791,7 @@ pub unsafe fn q6_w_vcombine_vv(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vd0))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vzero() -> HvxVector { +pub unsafe fn Q6_V_vzero() -> HvxVector { vd0() } @@ -1790,7 +1803,7 @@ pub unsafe fn q6_v_vzero() -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdealb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vdeal_vb(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vdeal_Vb(vu: HvxVector) -> HvxVector { vdealb(vu) } @@ -1802,7 +1815,7 @@ pub unsafe fn q6_vb_vdeal_vb(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdealb4w))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vdeale_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vdeale_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { vdealb4w(vu, vv) } @@ -1814,7 +1827,7 @@ pub unsafe fn q6_vb_vdeale_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdealh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vdeal_vh(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vdeal_Vh(vu: HvxVector) -> HvxVector { vdealh(vu) } @@ -1826,7 +1839,7 @@ pub unsafe fn q6_vh_vdeal_vh(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdealvdd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_w_vdeal_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_W_vdeal_VVR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { vdealvdd(vu, vv, rt) } @@ -1838,7 +1851,7 @@ pub unsafe fn q6_w_vdeal_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdelta))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vdelta_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vdelta_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { vdelta(vu, vv) } @@ -1850,7 +1863,7 @@ pub unsafe fn q6_v_vdelta_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpybus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vdmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vdmpy_VubRb(vu: HvxVector, rt: i32) -> HvxVector { vdmpybus(vu, rt) } @@ -1862,7 +1875,7 @@ pub unsafe fn q6_vh_vdmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpybus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vdmpyacc_vhvubrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vdmpyacc_VhVubRb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vdmpybus_acc(vx, vu, rt) } @@ -1874,7 +1887,7 @@ pub unsafe fn q6_vh_vdmpyacc_vhvubrb(vx: HvxVector, vu: HvxVector, rt: i32) -> H #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpybus_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vdmpy_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vdmpy_WubRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { vdmpybus_dv(vuu, rt) } @@ -1886,7 +1899,7 @@ pub unsafe fn q6_wh_vdmpy_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpybus_dv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vdmpyacc_whwubrb( +pub unsafe fn Q6_Wh_vdmpyacc_WhWubRb( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -1902,7 +1915,7 @@ pub unsafe fn q6_wh_vdmpyacc_whwubrb( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpy_vhrb(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpy_VhRb(vu: HvxVector, rt: i32) -> HvxVector { vdmpyhb(vu, rt) } @@ -1914,7 +1927,7 @@ pub unsafe fn q6_vw_vdmpy_vhrb(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpyacc_vwvhrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpyacc_VwVhRb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vdmpyhb_acc(vx, vu, rt) } @@ -1926,7 +1939,7 @@ pub unsafe fn q6_vw_vdmpyacc_vwvhrb(vx: HvxVector, vu: HvxVector, rt: i32) -> Hv #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhb_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vdmpy_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vdmpy_WhRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { vdmpyhb_dv(vuu, rt) } @@ -1938,7 +1951,7 @@ pub unsafe fn q6_ww_vdmpy_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhb_dv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vdmpyacc_wwwhrb( +pub unsafe fn Q6_Ww_vdmpyacc_WwWhRb( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -1954,7 +1967,7 @@ pub unsafe fn q6_ww_vdmpyacc_wwwhrb( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhisat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpy_whrh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpy_WhRh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { vdmpyhisat(vuu, rt) } @@ -1966,7 +1979,7 @@ pub unsafe fn q6_vw_vdmpy_whrh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhisat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpyacc_vwwhrh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpyacc_VwWhRh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i32) -> HvxVector { vdmpyhisat_acc(vx, vuu, rt) } @@ -1978,7 +1991,7 @@ pub unsafe fn q6_vw_vdmpyacc_vwwhrh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpy_vhrh_sat(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpy_VhRh_sat(vu: HvxVector, rt: i32) -> HvxVector { vdmpyhsat(vu, rt) } @@ -1990,7 +2003,7 @@ pub unsafe fn q6_vw_vdmpy_vhrh_sat(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpyacc_vwvhrh_sat(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpyacc_VwVhRh_sat(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vdmpyhsat_acc(vx, vu, rt) } @@ -2002,7 +2015,7 @@ pub unsafe fn q6_vw_vdmpyacc_vwvhrh_sat(vx: HvxVector, vu: HvxVector, rt: i32) - #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsuisat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpy_whruh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpy_WhRuh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { vdmpyhsuisat(vuu, rt) } @@ -2014,7 +2027,7 @@ pub unsafe fn q6_vw_vdmpy_whruh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsuisat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpyacc_vwwhruh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpyacc_VwWhRuh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i32) -> HvxVector { vdmpyhsuisat_acc(vx, vuu, rt) } @@ -2026,7 +2039,7 @@ pub unsafe fn q6_vw_vdmpyacc_vwwhruh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsusat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpy_vhruh_sat(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpy_VhRuh_sat(vu: HvxVector, rt: i32) -> HvxVector { vdmpyhsusat(vu, rt) } @@ -2038,7 +2051,7 @@ pub unsafe fn q6_vw_vdmpy_vhruh_sat(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsusat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpyacc_vwvhruh_sat(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpyacc_VwVhRuh_sat(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vdmpyhsusat_acc(vx, vu, rt) } @@ -2050,7 +2063,7 @@ pub unsafe fn q6_vw_vdmpyacc_vwvhruh_sat(vx: HvxVector, vu: HvxVector, rt: i32) #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhvsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpy_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpy_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vdmpyhvsat(vu, vv) } @@ -2062,7 +2075,7 @@ pub unsafe fn q6_vw_vdmpy_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhvsat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vdmpyacc_vwvhvh_sat(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vdmpyacc_VwVhVh_sat(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { vdmpyhvsat_acc(vx, vu, vv) } @@ -2074,7 +2087,7 @@ pub unsafe fn q6_vw_vdmpyacc_vwvhvh_sat(vx: HvxVector, vu: HvxVector, vv: HvxVec #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdsaduh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vdsad_wuhruh(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wuw_vdsad_WuhRuh(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { vdsaduh(vuu, rt) } @@ -2086,7 +2099,7 @@ pub unsafe fn q6_wuw_vdsad_wuhruh(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdsaduh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vdsadacc_wuwwuhruh( +pub unsafe fn Q6_Wuw_vdsadacc_WuwWuhRuh( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -2102,7 +2115,7 @@ pub unsafe fn q6_wuw_vdsadacc_wuwwuhruh( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vinsertwr))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vinsert_vwr(vx: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vinsert_VwR(vx: HvxVector, rt: i32) -> HvxVector { vinsertwr(vx, rt) } @@ -2114,7 +2127,7 @@ pub unsafe fn q6_vw_vinsert_vwr(vx: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlalignb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vlalign_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_V_vlalign_VVR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vlalignb(vu, vv, rt) } @@ -2126,7 +2139,7 @@ pub unsafe fn q6_v_vlalign_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlalignbi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vlalign_vvi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { +pub unsafe fn Q6_V_vlalign_VVI(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { vlalignbi(vu, vv, iu3) } @@ -2138,7 +2151,7 @@ pub unsafe fn q6_v_vlalign_vvi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVec #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlsrh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vlsr_vuhr(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vuh_vlsr_VuhR(vu: HvxVector, rt: i32) -> HvxVector { vlsrh(vu, rt) } @@ -2150,7 +2163,7 @@ pub unsafe fn q6_vuh_vlsr_vuhr(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlsrhv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vlsr_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vlsr_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vlsrhv(vu, vv) } @@ -2162,7 +2175,7 @@ pub unsafe fn q6_vh_vlsr_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlsrw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vlsr_vuwr(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vuw_vlsr_VuwR(vu: HvxVector, rt: i32) -> HvxVector { vlsrw(vu, rt) } @@ -2174,7 +2187,7 @@ pub unsafe fn q6_vuw_vlsr_vuwr(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlsrwv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vlsr_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vlsr_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vlsrwv(vu, vv) } @@ -2186,7 +2199,7 @@ pub unsafe fn q6_vw_vlsr_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlutvvb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vlut32_vbvbr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vb_vlut32_VbVbR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vlutvvb(vu, vv, rt) } @@ -2198,7 +2211,7 @@ pub unsafe fn q6_vb_vlut32_vbvbr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVe #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlutvvb_oracc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vlut32or_vbvbvbr( +pub unsafe fn Q6_Vb_vlut32or_VbVbVbR( vx: HvxVector, vu: HvxVector, vv: HvxVector, @@ -2215,7 +2228,7 @@ pub unsafe fn q6_vb_vlut32or_vbvbvbr( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlutvwh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vlut16_vbvhr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vlut16_VbVhR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { vlutvwh(vu, vv, rt) } @@ -2227,7 +2240,7 @@ pub unsafe fn q6_wh_vlut16_vbvhr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVe #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlutvwh_oracc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vlut16or_whvbvhr( +pub unsafe fn Q6_Wh_vlut16or_WhVbVhR( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -2244,7 +2257,7 @@ pub unsafe fn q6_wh_vlut16or_whvbvhr( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmaxh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vmax_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vmax_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vmaxh(vu, vv) } @@ -2256,7 +2269,7 @@ pub unsafe fn q6_vh_vmax_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmaxub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vmax_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vmax_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector { vmaxub(vu, vv) } @@ -2268,7 +2281,7 @@ pub unsafe fn q6_vub_vmax_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmaxuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vmax_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vmax_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVector { vmaxuh(vu, vv) } @@ -2280,7 +2293,7 @@ pub unsafe fn q6_vuh_vmax_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmaxw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmax_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vmax_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vmaxw(vu, vv) } @@ -2292,7 +2305,7 @@ pub unsafe fn q6_vw_vmax_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vminh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vmin_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vmin_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vminh(vu, vv) } @@ -2304,7 +2317,7 @@ pub unsafe fn q6_vh_vmin_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vminub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vmin_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vmin_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector { vminub(vu, vv) } @@ -2316,7 +2329,7 @@ pub unsafe fn q6_vub_vmin_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vminuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vmin_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vmin_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVector { vminuh(vu, vv) } @@ -2328,7 +2341,7 @@ pub unsafe fn q6_vuh_vmin_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vminw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmin_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vmin_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vminw(vu, vv) } @@ -2340,7 +2353,7 @@ pub unsafe fn q6_vw_vmin_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpabus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpa_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vmpa_WubRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { vmpabus(vuu, rt) } @@ -2352,7 +2365,7 @@ pub unsafe fn q6_wh_vmpa_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpabus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpaacc_whwubrb( +pub unsafe fn Q6_Wh_vmpaacc_WhWubRb( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -2368,7 +2381,7 @@ pub unsafe fn q6_wh_vmpaacc_whwubrb( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpabusv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpa_wubwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vmpa_WubWb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vmpabusv(vuu, vvv) } @@ -2380,7 +2393,7 @@ pub unsafe fn q6_wh_vmpa_wubwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVec #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpabuuv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpa_wubwub(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vmpa_WubWub(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vmpabuuv(vuu, vvv) } @@ -2392,7 +2405,7 @@ pub unsafe fn q6_wh_vmpa_wubwub(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVe #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpahb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vmpa_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vmpa_WhRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { vmpahb(vuu, rt) } @@ -2404,7 +2417,7 @@ pub unsafe fn q6_ww_vmpa_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpahb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vmpaacc_wwwhrb( +pub unsafe fn Q6_Ww_vmpaacc_WwWhRb( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -2420,7 +2433,7 @@ pub unsafe fn q6_ww_vmpaacc_wwwhrb( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vmpy_VubRb(vu: HvxVector, rt: i32) -> HvxVectorPair { vmpybus(vu, rt) } @@ -2432,7 +2445,7 @@ pub unsafe fn q6_wh_vmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpyacc_whvubrb(vxx: HvxVectorPair, vu: HvxVector, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vmpyacc_WhVubRb(vxx: HvxVectorPair, vu: HvxVector, rt: i32) -> HvxVectorPair { vmpybus_acc(vxx, vu, rt) } @@ -2444,7 +2457,7 @@ pub unsafe fn q6_wh_vmpyacc_whvubrb(vxx: HvxVectorPair, vu: HvxVector, rt: i32) #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybusv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpy_vubvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vmpy_VubVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vmpybusv(vu, vv) } @@ -2456,7 +2469,7 @@ pub unsafe fn q6_wh_vmpy_vubvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybusv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpyacc_whvubvb( +pub unsafe fn Q6_Wh_vmpyacc_WhVubVb( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -2472,7 +2485,7 @@ pub unsafe fn q6_wh_vmpyacc_whvubvb( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpy_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vmpy_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vmpybv(vu, vv) } @@ -2484,7 +2497,7 @@ pub unsafe fn q6_wh_vmpy_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpyacc_whvbvb( +pub unsafe fn Q6_Wh_vmpyacc_WhVbVb( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -2500,7 +2513,7 @@ pub unsafe fn q6_wh_vmpyacc_whvbvb( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyewuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpye_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vmpye_VwVuh(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyewuh(vu, vv) } @@ -2512,7 +2525,7 @@ pub unsafe fn q6_vw_vmpye_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vmpy_vhrh(vu: HvxVector, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vmpy_VhRh(vu: HvxVector, rt: i32) -> HvxVectorPair { vmpyh(vu, rt) } @@ -2524,7 +2537,7 @@ pub unsafe fn q6_ww_vmpy_vhrh(vu: HvxVector, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhsat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vmpyacc_wwvhrh_sat( +pub unsafe fn Q6_Ww_vmpyacc_WwVhRh_sat( vxx: HvxVectorPair, vu: HvxVector, rt: i32, @@ -2540,7 +2553,7 @@ pub unsafe fn q6_ww_vmpyacc_wwvhrh_sat( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhsrs))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vmpy_vhrh_s1_rnd_sat(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vmpy_VhRh_s1_rnd_sat(vu: HvxVector, rt: i32) -> HvxVector { vmpyhsrs(vu, rt) } @@ -2552,7 +2565,7 @@ pub unsafe fn q6_vh_vmpy_vhrh_s1_rnd_sat(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhss))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vmpy_vhrh_s1_sat(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vmpy_VhRh_s1_sat(vu: HvxVector, rt: i32) -> HvxVector { vmpyhss(vu, rt) } @@ -2564,7 +2577,7 @@ pub unsafe fn q6_vh_vmpy_vhrh_s1_sat(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vmpy_vhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vmpy_VhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vmpyhus(vu, vv) } @@ -2576,7 +2589,7 @@ pub unsafe fn q6_ww_vmpy_vhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vmpyacc_wwvhvuh( +pub unsafe fn Q6_Ww_vmpyacc_WwVhVuh( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -2592,7 +2605,7 @@ pub unsafe fn q6_ww_vmpyacc_wwvhvuh( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vmpy_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vmpy_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vmpyhv(vu, vv) } @@ -2604,7 +2617,7 @@ pub unsafe fn q6_ww_vmpy_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vmpyacc_wwvhvh( +pub unsafe fn Q6_Ww_vmpyacc_WwVhVh( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -2620,7 +2633,7 @@ pub unsafe fn q6_ww_vmpyacc_wwvhvh( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhvsrs))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vmpy_vhvh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vmpy_VhVh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyhvsrs(vu, vv) } @@ -2632,7 +2645,7 @@ pub unsafe fn q6_vh_vmpy_vhvh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVec #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyieoh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyieo_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyieo_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyieoh(vu, vv) } @@ -2644,7 +2657,7 @@ pub unsafe fn q6_vw_vmpyieo_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiewh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyieacc_vwvwvh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyieacc_VwVwVh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyiewh_acc(vx, vu, vv) } @@ -2656,7 +2669,7 @@ pub unsafe fn q6_vw_vmpyieacc_vwvwvh(vx: HvxVector, vu: HvxVector, vv: HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiewuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyie_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyie_VwVuh(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyiewuh(vu, vv) } @@ -2668,7 +2681,7 @@ pub unsafe fn q6_vw_vmpyie_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiewuh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyieacc_vwvwvuh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyieacc_VwVwVuh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyiewuh_acc(vx, vu, vv) } @@ -2680,7 +2693,7 @@ pub unsafe fn q6_vw_vmpyieacc_vwvwvuh(vx: HvxVector, vu: HvxVector, vv: HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyih))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vmpyi_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vmpyi_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyih(vu, vv) } @@ -2692,7 +2705,7 @@ pub unsafe fn q6_vh_vmpyi_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyih_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vmpyiacc_vhvhvh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vmpyiacc_VhVhVh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyih_acc(vx, vu, vv) } @@ -2704,7 +2717,7 @@ pub unsafe fn q6_vh_vmpyiacc_vhvhvh(vx: HvxVector, vu: HvxVector, vv: HvxVector) #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyihb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vmpyi_vhrb(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vmpyi_VhRb(vu: HvxVector, rt: i32) -> HvxVector { vmpyihb(vu, rt) } @@ -2716,7 +2729,7 @@ pub unsafe fn q6_vh_vmpyi_vhrb(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyihb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vmpyiacc_vhvhrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vmpyiacc_VhVhRb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vmpyihb_acc(vx, vu, rt) } @@ -2728,7 +2741,7 @@ pub unsafe fn q6_vh_vmpyiacc_vhvhrb(vx: HvxVector, vu: HvxVector, rt: i32) -> Hv #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiowh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyio_vwvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyio_VwVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyiowh(vu, vv) } @@ -2740,7 +2753,7 @@ pub unsafe fn q6_vw_vmpyio_vwvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiwb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyi_vwrb(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyi_VwRb(vu: HvxVector, rt: i32) -> HvxVector { vmpyiwb(vu, rt) } @@ -2752,7 +2765,7 @@ pub unsafe fn q6_vw_vmpyi_vwrb(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiwb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyiacc_vwvwrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyiacc_VwVwRb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vmpyiwb_acc(vx, vu, rt) } @@ -2764,7 +2777,7 @@ pub unsafe fn q6_vw_vmpyiacc_vwvwrb(vx: HvxVector, vu: HvxVector, rt: i32) -> Hv #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiwh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyi_vwrh(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyi_VwRh(vu: HvxVector, rt: i32) -> HvxVector { vmpyiwh(vu, rt) } @@ -2776,7 +2789,7 @@ pub unsafe fn q6_vw_vmpyi_vwrh(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiwh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyiacc_vwvwrh(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyiacc_VwVwRh(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vmpyiwh_acc(vx, vu, rt) } @@ -2788,7 +2801,7 @@ pub unsafe fn q6_vw_vmpyiacc_vwvwrh(vx: HvxVector, vu: HvxVector, rt: i32) -> Hv #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyowh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyo_vwvh_s1_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyo_VwVh_s1_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyowh(vu, vv) } @@ -2800,7 +2813,7 @@ pub unsafe fn q6_vw_vmpyo_vwvh_s1_sat(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyowh_rnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyo_vwvh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyo_VwVh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyowh_rnd(vu, vv) } @@ -2812,7 +2825,7 @@ pub unsafe fn q6_vw_vmpyo_vwvh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVe #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyowh_rnd_sacc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyoacc_vwvwvh_s1_rnd_sat_shift( +pub unsafe fn Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift( vx: HvxVector, vu: HvxVector, vv: HvxVector, @@ -2828,7 +2841,7 @@ pub unsafe fn q6_vw_vmpyoacc_vwvwvh_s1_rnd_sat_shift( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyowh_sacc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyoacc_vwvwvh_s1_sat_shift( +pub unsafe fn Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift( vx: HvxVector, vu: HvxVector, vv: HvxVector, @@ -2844,7 +2857,7 @@ pub unsafe fn q6_vw_vmpyoacc_vwvwvh_s1_sat_shift( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuh_vmpy_vubrub(vu: HvxVector, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wuh_vmpy_VubRub(vu: HvxVector, rt: i32) -> HvxVectorPair { vmpyub(vu, rt) } @@ -2856,7 +2869,7 @@ pub unsafe fn q6_wuh_vmpy_vubrub(vu: HvxVector, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyub_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuh_vmpyacc_wuhvubrub( +pub unsafe fn Q6_Wuh_vmpyacc_WuhVubRub( vxx: HvxVectorPair, vu: HvxVector, rt: i32, @@ -2872,7 +2885,7 @@ pub unsafe fn q6_wuh_vmpyacc_wuhvubrub( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyubv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuh_vmpy_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wuh_vmpy_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vmpyubv(vu, vv) } @@ -2884,7 +2897,7 @@ pub unsafe fn q6_wuh_vmpy_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyubv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuh_vmpyacc_wuhvubvub( +pub unsafe fn Q6_Wuh_vmpyacc_WuhVubVub( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -2900,7 +2913,7 @@ pub unsafe fn q6_wuh_vmpyacc_wuhvubvub( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vmpy_vuhruh(vu: HvxVector, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wuw_vmpy_VuhRuh(vu: HvxVector, rt: i32) -> HvxVectorPair { vmpyuh(vu, rt) } @@ -2912,7 +2925,7 @@ pub unsafe fn q6_wuw_vmpy_vuhruh(vu: HvxVector, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyuh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vmpyacc_wuwvuhruh( +pub unsafe fn Q6_Wuw_vmpyacc_WuwVuhRuh( vxx: HvxVectorPair, vu: HvxVector, rt: i32, @@ -2928,7 +2941,7 @@ pub unsafe fn q6_wuw_vmpyacc_wuwvuhruh( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyuhv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vmpy_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wuw_vmpy_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vmpyuhv(vu, vv) } @@ -2940,7 +2953,7 @@ pub unsafe fn q6_wuw_vmpy_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyuhv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vmpyacc_wuwvuhvuh( +pub unsafe fn Q6_Wuw_vmpyacc_WuwVuhVuh( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -2956,7 +2969,7 @@ pub unsafe fn q6_wuw_vmpyacc_wuwvuhvuh( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnavgh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vnavg_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vnavg_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vnavgh(vu, vv) } @@ -2968,7 +2981,7 @@ pub unsafe fn q6_vh_vnavg_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnavgub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vnavg_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vnavg_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector { vnavgub(vu, vv) } @@ -2980,7 +2993,7 @@ pub unsafe fn q6_vb_vnavg_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnavgw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vnavg_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vnavg_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vnavgw(vu, vv) } @@ -2992,7 +3005,7 @@ pub unsafe fn q6_vw_vnavg_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnormamth))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vnormamt_vh(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vnormamt_Vh(vu: HvxVector) -> HvxVector { vnormamth(vu) } @@ -3004,7 +3017,7 @@ pub unsafe fn q6_vh_vnormamt_vh(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnormamtw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vnormamt_vw(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vnormamt_Vw(vu: HvxVector) -> HvxVector { vnormamtw(vu) } @@ -3016,7 +3029,7 @@ pub unsafe fn q6_vw_vnormamt_vw(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnot))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vnot_v(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vnot_V(vu: HvxVector) -> HvxVector { vnot(vu) } @@ -3028,7 +3041,7 @@ pub unsafe fn q6_v_vnot_v(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vor))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vor_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vor_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { simd_or(vu, vv) } @@ -3040,7 +3053,7 @@ pub unsafe fn q6_v_vor_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackeb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vpacke_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vpacke_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vpackeb(vu, vv) } @@ -3052,7 +3065,7 @@ pub unsafe fn q6_vb_vpacke_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackeh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vpacke_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vpacke_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vpackeh(vu, vv) } @@ -3064,7 +3077,7 @@ pub unsafe fn q6_vh_vpacke_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackhb_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vpack_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vpack_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vpackhb_sat(vu, vv) } @@ -3076,7 +3089,7 @@ pub unsafe fn q6_vb_vpack_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackhub_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vpack_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vpack_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vpackhub_sat(vu, vv) } @@ -3088,7 +3101,7 @@ pub unsafe fn q6_vub_vpack_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackob))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vpacko_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vpacko_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vpackob(vu, vv) } @@ -3100,7 +3113,7 @@ pub unsafe fn q6_vb_vpacko_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackoh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vpacko_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vpacko_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vpackoh(vu, vv) } @@ -3112,7 +3125,7 @@ pub unsafe fn q6_vh_vpacko_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackwh_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vpack_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vpack_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vpackwh_sat(vu, vv) } @@ -3124,7 +3137,7 @@ pub unsafe fn q6_vh_vpack_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackwuh_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vpack_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vpack_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vpackwuh_sat(vu, vv) } @@ -3136,7 +3149,7 @@ pub unsafe fn q6_vuh_vpack_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpopcounth))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vpopcount_vh(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vpopcount_Vh(vu: HvxVector) -> HvxVector { vpopcounth(vu) } @@ -3148,7 +3161,7 @@ pub unsafe fn q6_vh_vpopcount_vh(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrdelta))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vrdelta_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vrdelta_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { vrdelta(vu, vv) } @@ -3160,7 +3173,7 @@ pub unsafe fn q6_v_vrdelta_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vrmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vrmpy_VubRb(vu: HvxVector, rt: i32) -> HvxVector { vrmpybus(vu, rt) } @@ -3172,7 +3185,7 @@ pub unsafe fn q6_vw_vrmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vrmpyacc_vwvubrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vrmpyacc_VwVubRb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vrmpybus_acc(vx, vu, rt) } @@ -3184,7 +3197,7 @@ pub unsafe fn q6_vw_vrmpyacc_vwvubrb(vx: HvxVector, vu: HvxVector, rt: i32) -> H #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybusi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vrmpy_wubrbi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vrmpy_WubRbI(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { vrmpybusi(vuu, rt, iu1) } @@ -3196,7 +3209,7 @@ pub unsafe fn q6_ww_vrmpy_wubrbi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVe #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybusi_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vrmpyacc_wwwubrbi( +pub unsafe fn Q6_Ww_vrmpyacc_WwWubRbI( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -3213,7 +3226,7 @@ pub unsafe fn q6_ww_vrmpyacc_wwwubrbi( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybusv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vrmpy_vubvb(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vrmpy_VubVb(vu: HvxVector, vv: HvxVector) -> HvxVector { vrmpybusv(vu, vv) } @@ -3225,7 +3238,7 @@ pub unsafe fn q6_vw_vrmpy_vubvb(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybusv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vrmpyacc_vwvubvb(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vrmpyacc_VwVubVb(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { vrmpybusv_acc(vx, vu, vv) } @@ -3237,7 +3250,7 @@ pub unsafe fn q6_vw_vrmpyacc_vwvubvb(vx: HvxVector, vu: HvxVector, vv: HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vrmpy_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vrmpy_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { vrmpybv(vu, vv) } @@ -3249,7 +3262,7 @@ pub unsafe fn q6_vw_vrmpy_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vrmpyacc_vwvbvb(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vrmpyacc_VwVbVb(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { vrmpybv_acc(vx, vu, vv) } @@ -3261,7 +3274,7 @@ pub unsafe fn q6_vw_vrmpyacc_vwvbvb(vx: HvxVector, vu: HvxVector, vv: HvxVector) #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vrmpy_vubrub(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vuw_vrmpy_VubRub(vu: HvxVector, rt: i32) -> HvxVector { vrmpyub(vu, rt) } @@ -3273,7 +3286,7 @@ pub unsafe fn q6_vuw_vrmpy_vubrub(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyub_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vrmpyacc_vuwvubrub(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vuw_vrmpyacc_VuwVubRub(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vrmpyub_acc(vx, vu, rt) } @@ -3285,7 +3298,7 @@ pub unsafe fn q6_vuw_vrmpyacc_vuwvubrub(vx: HvxVector, vu: HvxVector, rt: i32) - #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyubi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vrmpy_wubrubi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wuw_vrmpy_WubRubI(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { vrmpyubi(vuu, rt, iu1) } @@ -3297,7 +3310,7 @@ pub unsafe fn q6_wuw_vrmpy_wubrubi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> Hvx #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyubi_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vrmpyacc_wuwwubrubi( +pub unsafe fn Q6_Wuw_vrmpyacc_WuwWubRubI( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -3314,7 +3327,7 @@ pub unsafe fn q6_wuw_vrmpyacc_wuwwubrubi( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyubv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vrmpy_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuw_vrmpy_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector { vrmpyubv(vu, vv) } @@ -3326,7 +3339,7 @@ pub unsafe fn q6_vuw_vrmpy_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyubv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vrmpyacc_vuwvubvub(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuw_vrmpyacc_VuwVubVub(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { vrmpyubv_acc(vx, vu, vv) } @@ -3338,7 +3351,7 @@ pub unsafe fn q6_vuw_vrmpyacc_vuwvubvub(vx: HvxVector, vu: HvxVector, vv: HvxVec #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vror))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vror_vr(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_V_vror_VR(vu: HvxVector, rt: i32) -> HvxVector { vror(vu, rt) } @@ -3350,7 +3363,7 @@ pub unsafe fn q6_v_vror_vr(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vroundhb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vround_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vround_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vroundhb(vu, vv) } @@ -3362,7 +3375,7 @@ pub unsafe fn q6_vb_vround_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vroundhub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vround_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vround_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vroundhub(vu, vv) } @@ -3374,7 +3387,7 @@ pub unsafe fn q6_vub_vround_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vroundwh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vround_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vround_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vroundwh(vu, vv) } @@ -3386,7 +3399,7 @@ pub unsafe fn q6_vh_vround_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vroundwuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vround_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vround_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vroundwuh(vu, vv) } @@ -3398,7 +3411,7 @@ pub unsafe fn q6_vuh_vround_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrsadubi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vrsad_wubrubi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wuw_vrsad_WubRubI(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { vrsadubi(vuu, rt, iu1) } @@ -3410,7 +3423,7 @@ pub unsafe fn q6_wuw_vrsad_wubrubi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> Hvx #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrsadubi_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vrsadacc_wuwwubrubi( +pub unsafe fn Q6_Wuw_vrsadacc_WuwWubRubI( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -3427,7 +3440,7 @@ pub unsafe fn q6_wuw_vrsadacc_wuwwubrubi( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsathub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vsat_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vsat_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vsathub(vu, vv) } @@ -3439,7 +3452,7 @@ pub unsafe fn q6_vub_vsat_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsatwh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vsat_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vsat_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vsatwh(vu, vv) } @@ -3451,7 +3464,7 @@ pub unsafe fn q6_vh_vsat_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vsxt_vb(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vsxt_Vb(vu: HvxVector) -> HvxVectorPair { vsb(vu) } @@ -3463,7 +3476,7 @@ pub unsafe fn q6_wh_vsxt_vb(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vsxt_vh(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vsxt_Vh(vu: HvxVector) -> HvxVectorPair { vsh(vu) } @@ -3475,7 +3488,7 @@ pub unsafe fn q6_ww_vsxt_vh(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshufeh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vshuffe_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vshuffe_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vshufeh(vu, vv) } @@ -3487,7 +3500,7 @@ pub unsafe fn q6_vh_vshuffe_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshuffb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vshuff_vb(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vshuff_Vb(vu: HvxVector) -> HvxVector { vshuffb(vu) } @@ -3499,7 +3512,7 @@ pub unsafe fn q6_vb_vshuff_vb(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshuffeb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vshuffe_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vshuffe_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { vshuffeb(vu, vv) } @@ -3511,7 +3524,7 @@ pub unsafe fn q6_vb_vshuffe_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshuffh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vshuff_vh(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vshuff_Vh(vu: HvxVector) -> HvxVector { vshuffh(vu) } @@ -3523,7 +3536,7 @@ pub unsafe fn q6_vh_vshuff_vh(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshuffob))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vshuffo_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vshuffo_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { vshuffob(vu, vv) } @@ -3535,7 +3548,7 @@ pub unsafe fn q6_vb_vshuffo_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshuffvdd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_w_vshuff_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_W_vshuff_VVR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { vshuffvdd(vu, vv, rt) } @@ -3547,7 +3560,7 @@ pub unsafe fn q6_w_vshuff_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshufoeb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wb_vshuffoe_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wb_vshuffoe_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vshufoeb(vu, vv) } @@ -3559,7 +3572,7 @@ pub unsafe fn q6_wb_vshuffoe_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshufoeh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vshuffoe_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vshuffoe_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vshufoeh(vu, vv) } @@ -3571,7 +3584,7 @@ pub unsafe fn q6_wh_vshuffoe_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshufoh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vshuffo_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vshuffo_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vshufoh(vu, vv) } @@ -3583,7 +3596,7 @@ pub unsafe fn q6_vh_vshuffo_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vsub_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vsub_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { vsubb(vu, vv) } @@ -3595,7 +3608,7 @@ pub unsafe fn q6_vb_vsub_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubb_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wb_vsub_wbwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wb_vsub_WbWb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vsubb_dv(vuu, vvv) } @@ -3607,7 +3620,7 @@ pub unsafe fn q6_wb_vsub_wbwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vsub_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vsub_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vsubh(vu, vv) } @@ -3619,7 +3632,7 @@ pub unsafe fn q6_vh_vsub_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubh_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vsub_whwh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vsub_WhWh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vsubh_dv(vuu, vvv) } @@ -3631,7 +3644,7 @@ pub unsafe fn q6_wh_vsub_whwh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vsub_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vsub_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vsubhsat(vu, vv) } @@ -3643,7 +3656,7 @@ pub unsafe fn q6_vh_vsub_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubhsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vsub_whwh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vsub_WhWh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vsubhsat_dv(vuu, vvv) } @@ -3655,7 +3668,7 @@ pub unsafe fn q6_wh_vsub_whwh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vsub_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vsub_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vsubhw(vu, vv) } @@ -3667,7 +3680,7 @@ pub unsafe fn q6_ww_vsub_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsububh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vsub_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vsub_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vsububh(vu, vv) } @@ -3679,7 +3692,7 @@ pub unsafe fn q6_wh_vsub_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsububsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vsub_vubvub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vsub_VubVub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vsububsat(vu, vv) } @@ -3691,7 +3704,7 @@ pub unsafe fn q6_vub_vsub_vubvub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsububsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wub_vsub_wubwub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wub_vsub_WubWub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vsububsat_dv(vuu, vvv) } @@ -3703,7 +3716,7 @@ pub unsafe fn q6_wub_vsub_wubwub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubuhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vsub_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vsub_VuhVuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vsubuhsat(vu, vv) } @@ -3715,7 +3728,7 @@ pub unsafe fn q6_vuh_vsub_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubuhsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuh_vsub_wuhwuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wuh_vsub_WuhWuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vsubuhsat_dv(vuu, vvv) } @@ -3727,7 +3740,7 @@ pub unsafe fn q6_wuh_vsub_wuhwuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubuhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vsub_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vsub_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vsubuhw(vu, vv) } @@ -3739,7 +3752,7 @@ pub unsafe fn q6_ww_vsub_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vsub_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vsub_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { simd_sub(vu, vv) } @@ -3751,7 +3764,7 @@ pub unsafe fn q6_vw_vsub_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubw_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vsub_wwww(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vsub_WwWw(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vsubw_dv(vuu, vvv) } @@ -3763,7 +3776,7 @@ pub unsafe fn q6_ww_vsub_wwww(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubwsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vsub_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vsub_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vsubwsat(vu, vv) } @@ -3775,7 +3788,7 @@ pub unsafe fn q6_vw_vsub_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubwsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vsub_wwww_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vsub_WwWw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vsubwsat_dv(vuu, vvv) } @@ -3787,7 +3800,7 @@ pub unsafe fn q6_ww_vsub_wwww_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpyb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vtmpy_wbrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vtmpy_WbRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { vtmpyb(vuu, rt) } @@ -3799,7 +3812,7 @@ pub unsafe fn q6_wh_vtmpy_wbrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpyb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vtmpyacc_whwbrb( +pub unsafe fn Q6_Wh_vtmpyacc_WhWbRb( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -3815,7 +3828,7 @@ pub unsafe fn q6_wh_vtmpyacc_whwbrb( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpybus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vtmpy_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vtmpy_WubRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { vtmpybus(vuu, rt) } @@ -3827,7 +3840,7 @@ pub unsafe fn q6_wh_vtmpy_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpybus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vtmpyacc_whwubrb( +pub unsafe fn Q6_Wh_vtmpyacc_WhWubRb( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -3843,7 +3856,7 @@ pub unsafe fn q6_wh_vtmpyacc_whwubrb( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpyhb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vtmpy_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vtmpy_WhRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { vtmpyhb(vuu, rt) } @@ -3855,7 +3868,7 @@ pub unsafe fn q6_ww_vtmpy_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpyhb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vtmpyacc_wwwhrb( +pub unsafe fn Q6_Ww_vtmpyacc_WwWhRb( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -3871,7 +3884,7 @@ pub unsafe fn q6_ww_vtmpyacc_wwwhrb( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vunpack_vb(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vunpack_Vb(vu: HvxVector) -> HvxVectorPair { vunpackb(vu) } @@ -3883,7 +3896,7 @@ pub unsafe fn q6_wh_vunpack_vb(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vunpack_vh(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vunpack_Vh(vu: HvxVector) -> HvxVectorPair { vunpackh(vu) } @@ -3895,7 +3908,7 @@ pub unsafe fn q6_ww_vunpack_vh(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackob))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vunpackoor_whvb(vxx: HvxVectorPair, vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vunpackoor_WhVb(vxx: HvxVectorPair, vu: HvxVector) -> HvxVectorPair { vunpackob(vxx, vu) } @@ -3907,7 +3920,7 @@ pub unsafe fn q6_wh_vunpackoor_whvb(vxx: HvxVectorPair, vu: HvxVector) -> HvxVec #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackoh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vunpackoor_wwvh(vxx: HvxVectorPair, vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vunpackoor_WwVh(vxx: HvxVectorPair, vu: HvxVector) -> HvxVectorPair { vunpackoh(vxx, vu) } @@ -3919,7 +3932,7 @@ pub unsafe fn q6_ww_vunpackoor_wwvh(vxx: HvxVectorPair, vu: HvxVector) -> HvxVec #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuh_vunpack_vub(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wuh_vunpack_Vub(vu: HvxVector) -> HvxVectorPair { vunpackub(vu) } @@ -3931,7 +3944,7 @@ pub unsafe fn q6_wuh_vunpack_vub(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vunpack_vuh(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wuw_vunpack_Vuh(vu: HvxVector) -> HvxVectorPair { vunpackuh(vu) } @@ -3943,7 +3956,7 @@ pub unsafe fn q6_wuw_vunpack_vuh(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vxor))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vxor_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vxor_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { simd_xor(vu, vv) } @@ -3955,7 +3968,7 @@ pub unsafe fn q6_v_vxor_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vzb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuh_vzxt_vub(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wuh_vzxt_Vub(vu: HvxVector) -> HvxVectorPair { vzb(vu) } @@ -3967,7 +3980,7 @@ pub unsafe fn q6_wuh_vzxt_vub(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vzh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vzxt_vuh(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wuw_vzxt_Vuh(vu: HvxVector) -> HvxVectorPair { vzh(vu) } @@ -3979,7 +3992,7 @@ pub unsafe fn q6_wuw_vzxt_vuh(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(lvsplatb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vsplat_r(rt: i32) -> HvxVector { +pub unsafe fn Q6_Vb_vsplat_R(rt: i32) -> HvxVector { lvsplatb(rt) } @@ -3991,7 +4004,7 @@ pub unsafe fn q6_vb_vsplat_r(rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(lvsplath))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vsplat_r(rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vsplat_R(rt: i32) -> HvxVector { lvsplath(rt) } @@ -4003,7 +4016,7 @@ pub unsafe fn q6_vh_vsplat_r(rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddbsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vadd_vbvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vadd_VbVb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vaddbsat(vu, vv) } @@ -4015,7 +4028,7 @@ pub unsafe fn q6_vb_vadd_vbvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddbsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wb_vadd_wbwb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wb_vadd_WbWb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vaddbsat_dv(vuu, vvv) } @@ -4027,7 +4040,7 @@ pub unsafe fn q6_wb_vadd_wbwb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddclbh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vadd_vclb_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vadd_vclb_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { vaddclbh(vu, vv) } @@ -4039,7 +4052,7 @@ pub unsafe fn q6_vh_vadd_vclb_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddclbw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vadd_vclb_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vadd_vclb_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vaddclbw(vu, vv) } @@ -4051,7 +4064,7 @@ pub unsafe fn q6_vw_vadd_vclb_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddhw_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vaddacc_wwvhvh( +pub unsafe fn Q6_Ww_vaddacc_WwVhVh( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -4067,7 +4080,7 @@ pub unsafe fn q6_ww_vaddacc_wwvhvh( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddubh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vaddacc_whvubvub( +pub unsafe fn Q6_Wh_vaddacc_WhVubVub( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -4083,7 +4096,7 @@ pub unsafe fn q6_wh_vaddacc_whvubvub( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddububb_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vadd_vubvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vadd_VubVb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vaddububb_sat(vu, vv) } @@ -4095,7 +4108,7 @@ pub unsafe fn q6_vub_vadd_vubvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vadduhw_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vaddacc_wwvuhvuh( +pub unsafe fn Q6_Ww_vaddacc_WwVuhVuh( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -4111,7 +4124,7 @@ pub unsafe fn q6_ww_vaddacc_wwvuhvuh( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vadduwsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vadd_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuw_vadd_VuwVuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vadduwsat(vu, vv) } @@ -4123,7 +4136,7 @@ pub unsafe fn q6_vuw_vadd_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vadduwsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vadd_wuwwuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wuw_vadd_WuwWuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vadduwsat_dv(vuu, vvv) } @@ -4135,7 +4148,7 @@ pub unsafe fn q6_wuw_vadd_wuwwuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vasrhbsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vasr_vhvhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vb_vasr_VhVhR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasrhbsat(vu, vv, rt) } @@ -4147,7 +4160,7 @@ pub unsafe fn q6_vb_vasr_vhvhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> Hvx #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vasruwuhrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vasr_vuwvuwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vuh_vasr_VuwVuwR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasruwuhrndsat(vu, vv, rt) } @@ -4159,7 +4172,7 @@ pub unsafe fn q6_vuh_vasr_vuwvuwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vasrwuhrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vasr_vwvwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vuh_vasr_VwVwR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasrwuhrndsat(vu, vv, rt) } @@ -4171,7 +4184,7 @@ pub unsafe fn q6_vuh_vasr_vwvwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) - #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlsrb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vlsr_vubr(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vub_vlsr_VubR(vu: HvxVector, rt: i32) -> HvxVector { vlsrb(vu, rt) } @@ -4183,7 +4196,7 @@ pub unsafe fn q6_vub_vlsr_vubr(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvvb_nm))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vlut32_vbvbr_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vb_vlut32_VbVbR_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vlutvvb_nm(vu, vv, rt) } @@ -4195,7 +4208,7 @@ pub unsafe fn q6_vb_vlut32_vbvbr_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvvb_oracci))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vlut32or_vbvbvbi( +pub unsafe fn Q6_Vb_vlut32or_VbVbVbI( vx: HvxVector, vu: HvxVector, vv: HvxVector, @@ -4212,7 +4225,7 @@ pub unsafe fn q6_vb_vlut32or_vbvbvbi( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvvbi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vlut32_vbvbi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { +pub unsafe fn Q6_Vb_vlut32_VbVbI(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { vlutvvbi(vu, vv, iu3) } @@ -4224,7 +4237,7 @@ pub unsafe fn q6_vb_vlut32_vbvbi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxV #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvwh_nm))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vlut16_vbvhr_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vlut16_VbVhR_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { vlutvwh_nm(vu, vv, rt) } @@ -4236,7 +4249,7 @@ pub unsafe fn q6_wh_vlut16_vbvhr_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvwh_oracci))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vlut16or_whvbvhi( +pub unsafe fn Q6_Wh_vlut16or_WhVbVhI( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -4253,7 +4266,7 @@ pub unsafe fn q6_wh_vlut16or_whvbvhi( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvwhi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vlut16_vbvhi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vlut16_VbVhI(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVectorPair { vlutvwhi(vu, vv, iu3) } @@ -4265,7 +4278,7 @@ pub unsafe fn q6_wh_vlut16_vbvhi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxV #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmaxb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vmax_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vmax_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { vmaxb(vu, vv) } @@ -4277,7 +4290,7 @@ pub unsafe fn q6_vb_vmax_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vminb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vmin_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vmin_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { vminb(vu, vv) } @@ -4289,7 +4302,7 @@ pub unsafe fn q6_vb_vmin_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpauhb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vmpa_wuhrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vmpa_WuhRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { vmpauhb(vuu, rt) } @@ -4301,7 +4314,7 @@ pub unsafe fn q6_ww_vmpa_wuhrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpauhb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vmpaacc_wwwuhrb( +pub unsafe fn Q6_Ww_vmpaacc_WwWuhRb( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -4317,7 +4330,7 @@ pub unsafe fn q6_ww_vmpaacc_wwwuhrb( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpyewuh_64))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_w_vmpye_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_W_vmpye_VwVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vmpyewuh_64(vu, vv) } @@ -4329,7 +4342,7 @@ pub unsafe fn q6_w_vmpye_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpyiwub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyi_vwrub(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyi_VwRub(vu: HvxVector, rt: i32) -> HvxVector { vmpyiwub(vu, rt) } @@ -4341,7 +4354,7 @@ pub unsafe fn q6_vw_vmpyi_vwrub(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpyiwub_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vmpyiacc_vwvwrub(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vw_vmpyiacc_VwVwRub(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vmpyiwub_acc(vx, vu, rt) } @@ -4353,7 +4366,7 @@ pub unsafe fn q6_vw_vmpyiacc_vwvwrub(vx: HvxVector, vu: HvxVector, rt: i32) -> H #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpyowh_64_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_w_vmpyoacc_wvwvh( +pub unsafe fn Q6_W_vmpyoacc_WVwVh( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -4369,7 +4382,7 @@ pub unsafe fn q6_w_vmpyoacc_wvwvh( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vrounduhub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vround_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vround_VuhVuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vrounduhub(vu, vv) } @@ -4381,7 +4394,7 @@ pub unsafe fn q6_vub_vround_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vrounduwuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vround_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vround_VuwVuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vrounduwuh(vu, vv) } @@ -4393,7 +4406,7 @@ pub unsafe fn q6_vuh_vround_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsatuwuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vsat_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vsat_VuwVuw(vu: HvxVector, vv: HvxVector) -> HvxVector { vsatuwuh(vu, vv) } @@ -4405,7 +4418,7 @@ pub unsafe fn q6_vuh_vsat_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsubbsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vsub_vbvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vsub_VbVb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vsubbsat(vu, vv) } @@ -4417,7 +4430,7 @@ pub unsafe fn q6_vb_vsub_vbvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsubbsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wb_vsub_wbwb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wb_vsub_WbWb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vsubbsat_dv(vuu, vvv) } @@ -4429,7 +4442,7 @@ pub unsafe fn q6_wb_vsub_wbwb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsubububb_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vsub_vubvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vsub_VubVb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vsubububb_sat(vu, vv) } @@ -4441,7 +4454,7 @@ pub unsafe fn q6_vub_vsub_vubvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsubuwsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vsub_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuw_vsub_VuwVuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { vsubuwsat(vu, vv) } @@ -4453,7 +4466,7 @@ pub unsafe fn q6_vuw_vsub_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsubuwsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wuw_vsub_wuwwuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { +pub unsafe fn Q6_Wuw_vsub_WuwWuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { vsubuwsat_dv(vuu, vvv) } @@ -4465,7 +4478,7 @@ pub unsafe fn q6_wuw_vsub_wuwwuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vabsb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vabs_vb(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vabs_Vb(vu: HvxVector) -> HvxVector { vabsb(vu) } @@ -4477,7 +4490,7 @@ pub unsafe fn q6_vb_vabs_vb(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vabsb_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vabs_vb_sat(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vabs_Vb_sat(vu: HvxVector) -> HvxVector { vabsb_sat(vu) } @@ -4489,7 +4502,7 @@ pub unsafe fn q6_vb_vabs_vb_sat(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vaslh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vaslacc_vhvhr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vaslacc_VhVhR(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vaslh_acc(vx, vu, rt) } @@ -4501,7 +4514,7 @@ pub unsafe fn q6_vh_vaslacc_vhvhr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxV #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vasrh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vasracc_vhvhr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vh_vasracc_VhVhR(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vasrh_acc(vx, vu, rt) } @@ -4513,7 +4526,7 @@ pub unsafe fn q6_vh_vasracc_vhvhr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxV #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vasruhubrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vasr_vuhvuhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vub_vasr_VuhVuhR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasruhubrndsat(vu, vv, rt) } @@ -4525,7 +4538,7 @@ pub unsafe fn q6_vub_vasr_vuhvuhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vasruhubsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vasr_vuhvuhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vub_vasr_VuhVuhR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasruhubsat(vu, vv, rt) } @@ -4537,7 +4550,7 @@ pub unsafe fn q6_vub_vasr_vuhvuhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vasruwuhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vasr_vuwvuwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vuh_vasr_VuwVuwR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { vasruwuhsat(vu, vv, rt) } @@ -4549,7 +4562,7 @@ pub unsafe fn q6_vuh_vasr_vuwvuwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vavgb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vavg_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vavg_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { vavgb(vu, vv) } @@ -4561,7 +4574,7 @@ pub unsafe fn q6_vb_vavg_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vavgbrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vavg_vbvb_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vavg_VbVb_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { vavgbrnd(vu, vv) } @@ -4573,7 +4586,7 @@ pub unsafe fn q6_vb_vavg_vbvb_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vavguw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vavg_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuw_vavg_VuwVuw(vu: HvxVector, vv: HvxVector) -> HvxVector { vavguw(vu, vv) } @@ -4585,7 +4598,7 @@ pub unsafe fn q6_vuw_vavg_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vavguwrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vavg_vuwvuw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuw_vavg_VuwVuw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { vavguwrnd(vu, vv) } @@ -4597,7 +4610,7 @@ pub unsafe fn q6_vuw_vavg_vuwvuw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vdd0))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_w_vzero() -> HvxVectorPair { +pub unsafe fn Q6_W_vzero() -> HvxVectorPair { vdd0() } @@ -4609,7 +4622,7 @@ pub unsafe fn q6_w_vzero() -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vgathermh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vgather_armvh(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVector) { +pub unsafe fn Q6_vgather_ARMVh(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVector) { vgathermh(rs, rt, mu, vv) } @@ -4621,7 +4634,7 @@ pub unsafe fn q6_vgather_armvh(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vgathermhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vgather_armww(rs: *mut HvxVector, rt: i32, mu: i32, vvv: HvxVectorPair) { +pub unsafe fn Q6_vgather_ARMWw(rs: *mut HvxVector, rt: i32, mu: i32, vvv: HvxVectorPair) { vgathermhw(rs, rt, mu, vvv) } @@ -4633,7 +4646,7 @@ pub unsafe fn q6_vgather_armww(rs: *mut HvxVector, rt: i32, mu: i32, vvv: HvxVec #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vgathermw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vgather_armvw(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVector) { +pub unsafe fn Q6_vgather_ARMVw(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVector) { vgathermw(rs, rt, mu, vv) } @@ -4645,7 +4658,7 @@ pub unsafe fn q6_vgather_armvw(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vmpabuu))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpa_wubrub(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Wh_vmpa_WubRub(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { vmpabuu(vuu, rt) } @@ -4657,7 +4670,7 @@ pub unsafe fn q6_wh_vmpa_wubrub(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vmpabuu_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wh_vmpaacc_whwubrub( +pub unsafe fn Q6_Wh_vmpaacc_WhWubRub( vxx: HvxVectorPair, vuu: HvxVectorPair, rt: i32, @@ -4673,7 +4686,7 @@ pub unsafe fn q6_wh_vmpaacc_whwubrub( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vmpyh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vmpyacc_wwvhrh(vxx: HvxVectorPair, vu: HvxVector, rt: i32) -> HvxVectorPair { +pub unsafe fn Q6_Ww_vmpyacc_WwVhRh(vxx: HvxVectorPair, vu: HvxVector, rt: i32) -> HvxVectorPair { vmpyh_acc(vxx, vu, rt) } @@ -4685,7 +4698,7 @@ pub unsafe fn q6_ww_vmpyacc_wwvhrh(vxx: HvxVectorPair, vu: HvxVector, rt: i32) - #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vmpyuhe))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vmpye_vuhruh(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vuw_vmpye_VuhRuh(vu: HvxVector, rt: i32) -> HvxVector { vmpyuhe(vu, rt) } @@ -4697,7 +4710,7 @@ pub unsafe fn q6_vuw_vmpye_vuhruh(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vmpyuhe_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vmpyeacc_vuwvuhruh(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_Vuw_vmpyeacc_VuwVuhRuh(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { vmpyuhe_acc(vx, vu, rt) } @@ -4709,7 +4722,7 @@ pub unsafe fn q6_vuw_vmpyeacc_vuwvuhruh(vx: HvxVector, vu: HvxVector, rt: i32) - #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vnavgb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vnavg_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vnavg_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { vnavgb(vu, vv) } @@ -4721,7 +4734,7 @@ pub unsafe fn q6_vb_vnavg_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vscatter_rmvhv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { +pub unsafe fn Q6_vscatter_RMVhV(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { vscattermh(rt, mu, vv, vw) } @@ -4733,7 +4746,7 @@ pub unsafe fn q6_vscatter_rmvhv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermh_add))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vscatteracc_rmvhv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { +pub unsafe fn Q6_vscatteracc_RMVhV(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { vscattermh_add(rt, mu, vv, vw) } @@ -4745,7 +4758,7 @@ pub unsafe fn q6_vscatteracc_rmvhv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vscatter_rmwwv(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVector) { +pub unsafe fn Q6_vscatter_RMWwV(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVector) { vscattermhw(rt, mu, vvv, vw) } @@ -4757,7 +4770,7 @@ pub unsafe fn q6_vscatter_rmwwv(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVec #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermhw_add))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vscatteracc_rmwwv(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVector) { +pub unsafe fn Q6_vscatteracc_RMWwV(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVector) { vscattermhw_add(rt, mu, vvv, vw) } @@ -4769,7 +4782,7 @@ pub unsafe fn q6_vscatteracc_rmwwv(rt: i32, mu: i32, vvv: HvxVectorPair, vw: Hvx #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vscatter_rmvwv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { +pub unsafe fn Q6_vscatter_RMVwV(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { vscattermw(rt, mu, vv, vw) } @@ -4781,7 +4794,7 @@ pub unsafe fn q6_vscatter_rmvwv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermw_add))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vscatteracc_rmvwv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { +pub unsafe fn Q6_vscatteracc_RMVwV(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { vscattermw_add(rt, mu, vv, vw) } @@ -4793,7 +4806,7 @@ pub unsafe fn q6_vscatteracc_rmvwv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] #[cfg_attr(test, assert_instr(vasr_into))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_vasrinto_wwvwvw( +pub unsafe fn Q6_Ww_vasrinto_WwVwVw( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -4809,7 +4822,7 @@ pub unsafe fn q6_ww_vasrinto_wwvwvw( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] #[cfg_attr(test, assert_instr(vrotr))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuw_vrotr_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuw_vrotr_VuwVuw(vu: HvxVector, vv: HvxVector) -> HvxVector { vrotr(vu, vv) } @@ -4821,7 +4834,7 @@ pub unsafe fn q6_vuw_vrotr_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] #[cfg_attr(test, assert_instr(vsatdw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vsatdw_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vsatdw_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { vsatdw(vu, vv) } @@ -4833,7 +4846,7 @@ pub unsafe fn q6_vw_vsatdw_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(v6mpyhubs10))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_v6mpy_wubwbi_h( +pub unsafe fn Q6_Ww_v6mpy_WubWbI_h( vuu: HvxVectorPair, vvv: HvxVectorPair, iu2: i32, @@ -4849,7 +4862,7 @@ pub unsafe fn q6_ww_v6mpy_wubwbi_h( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(v6mpyhubs10_vxx))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_v6mpyacc_wwwubwbi_h( +pub unsafe fn Q6_Ww_v6mpyacc_WwWubWbI_h( vxx: HvxVectorPair, vuu: HvxVectorPair, vvv: HvxVectorPair, @@ -4866,7 +4879,7 @@ pub unsafe fn q6_ww_v6mpyacc_wwwubwbi_h( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(v6mpyvubs10))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_v6mpy_wubwbi_v( +pub unsafe fn Q6_Ww_v6mpy_WubWbI_v( vuu: HvxVectorPair, vvv: HvxVectorPair, iu2: i32, @@ -4882,7 +4895,7 @@ pub unsafe fn q6_ww_v6mpy_wubwbi_v( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(v6mpyvubs10_vxx))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_ww_v6mpyacc_wwwubwbi_v( +pub unsafe fn Q6_Ww_v6mpyacc_WwWubWbI_v( vxx: HvxVectorPair, vuu: HvxVectorPair, vvv: HvxVectorPair, @@ -4899,7 +4912,7 @@ pub unsafe fn q6_ww_v6mpyacc_wwwubwbi_v( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vabs_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vabs_vhf(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vabs_Vhf(vu: HvxVector) -> HvxVector { vabs_hf(vu) } @@ -4911,7 +4924,7 @@ pub unsafe fn q6_vhf_vabs_vhf(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vabs_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_vabs_vsf(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_vabs_Vsf(vu: HvxVector) -> HvxVector { vabs_sf(vu) } @@ -4923,7 +4936,7 @@ pub unsafe fn q6_vsf_vabs_vsf(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf16_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf16_vadd_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vadd_hf(vu, vv) } @@ -4935,7 +4948,7 @@ pub unsafe fn q6_vqf16_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_hf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vadd_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vadd_hf_hf(vu, vv) } @@ -4947,7 +4960,7 @@ pub unsafe fn q6_vhf_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_qf16))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf16_vadd_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf16_vadd_Vqf16Vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { vadd_qf16(vu, vv) } @@ -4959,7 +4972,7 @@ pub unsafe fn q6_vqf16_vadd_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_qf16_mix))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf16_vadd_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf16_vadd_Vqf16Vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vadd_qf16_mix(vu, vv) } @@ -4971,7 +4984,7 @@ pub unsafe fn q6_vqf16_vadd_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_qf32))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf32_vadd_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf32_vadd_Vqf32Vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { vadd_qf32(vu, vv) } @@ -4983,7 +4996,7 @@ pub unsafe fn q6_vqf32_vadd_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_qf32_mix))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf32_vadd_vqf32vsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf32_vadd_Vqf32Vsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vadd_qf32_mix(vu, vv) } @@ -4995,7 +5008,7 @@ pub unsafe fn q6_vqf32_vadd_vqf32vsf(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf32_vadd_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf32_vadd_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vadd_sf(vu, vv) } @@ -5007,7 +5020,7 @@ pub unsafe fn q6_vqf32_vadd_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_sf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wsf_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wsf_vadd_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vadd_sf_hf(vu, vv) } @@ -5019,7 +5032,7 @@ pub unsafe fn q6_wsf_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_sf_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_vadd_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_vadd_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vadd_sf_sf(vu, vv) } @@ -5031,7 +5044,7 @@ pub unsafe fn q6_vsf_vadd_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vassign_fp))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vfmv_vw(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_vfmv_Vw(vu: HvxVector) -> HvxVector { vassign_fp(vu) } @@ -5043,7 +5056,7 @@ pub unsafe fn q6_vw_vfmv_vw(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vconv_hf_qf16))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_equals_vqf16(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_equals_Vqf16(vu: HvxVector) -> HvxVector { vconv_hf_qf16(vu) } @@ -5055,7 +5068,7 @@ pub unsafe fn q6_vhf_equals_vqf16(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vconv_hf_qf32))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_equals_wqf32(vuu: HvxVectorPair) -> HvxVector { +pub unsafe fn Q6_Vhf_equals_Wqf32(vuu: HvxVectorPair) -> HvxVector { vconv_hf_qf32(vuu) } @@ -5067,7 +5080,7 @@ pub unsafe fn q6_vhf_equals_wqf32(vuu: HvxVectorPair) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vconv_sf_qf32))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_equals_vqf32(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_equals_Vqf32(vu: HvxVector) -> HvxVector { vconv_sf_qf32(vu) } @@ -5079,7 +5092,7 @@ pub unsafe fn q6_vsf_equals_vqf32(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_b_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_vcvt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_vcvt_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vcvt_b_hf(vu, vv) } @@ -5091,7 +5104,7 @@ pub unsafe fn q6_vb_vcvt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_h_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_vcvt_vhf(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_vcvt_Vhf(vu: HvxVector) -> HvxVector { vcvt_h_hf(vu) } @@ -5103,7 +5116,7 @@ pub unsafe fn q6_vh_vcvt_vhf(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_hf_b))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_whf_vcvt_vb(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Whf_vcvt_Vb(vu: HvxVector) -> HvxVectorPair { vcvt_hf_b(vu) } @@ -5115,7 +5128,7 @@ pub unsafe fn q6_whf_vcvt_vb(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_hf_h))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vcvt_vh(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vcvt_Vh(vu: HvxVector) -> HvxVector { vcvt_hf_h(vu) } @@ -5127,7 +5140,7 @@ pub unsafe fn q6_vhf_vcvt_vh(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_hf_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vcvt_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vcvt_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vcvt_hf_sf(vu, vv) } @@ -5139,7 +5152,7 @@ pub unsafe fn q6_vhf_vcvt_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_hf_ub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_whf_vcvt_vub(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Whf_vcvt_Vub(vu: HvxVector) -> HvxVectorPair { vcvt_hf_ub(vu) } @@ -5151,7 +5164,7 @@ pub unsafe fn q6_whf_vcvt_vub(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_hf_uh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vcvt_vuh(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vcvt_Vuh(vu: HvxVector) -> HvxVector { vcvt_hf_uh(vu) } @@ -5163,7 +5176,7 @@ pub unsafe fn q6_vhf_vcvt_vuh(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_sf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wsf_vcvt_vhf(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wsf_vcvt_Vhf(vu: HvxVector) -> HvxVectorPair { vcvt_sf_hf(vu) } @@ -5175,7 +5188,7 @@ pub unsafe fn q6_wsf_vcvt_vhf(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_ub_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vcvt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vcvt_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vcvt_ub_hf(vu, vv) } @@ -5187,7 +5200,7 @@ pub unsafe fn q6_vub_vcvt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_uh_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vcvt_vhf(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vcvt_Vhf(vu: HvxVector) -> HvxVector { vcvt_uh_hf(vu) } @@ -5199,7 +5212,7 @@ pub unsafe fn q6_vuh_vcvt_vhf(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vdmpy_sf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_vdmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_vdmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vdmpy_sf_hf(vu, vv) } @@ -5211,7 +5224,7 @@ pub unsafe fn q6_vsf_vdmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vdmpy_sf_hf_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_vdmpyacc_vsfvhfvhf(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_vdmpyacc_VsfVhfVhf(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { vdmpy_sf_hf_acc(vx, vu, vv) } @@ -5223,7 +5236,7 @@ pub unsafe fn q6_vsf_vdmpyacc_vsfvhfvhf(vx: HvxVector, vu: HvxVector, vv: HvxVec #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfmax_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vfmax_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vfmax_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vfmax_hf(vu, vv) } @@ -5235,7 +5248,7 @@ pub unsafe fn q6_vhf_vfmax_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfmax_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_vfmax_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_vfmax_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vfmax_sf(vu, vv) } @@ -5247,7 +5260,7 @@ pub unsafe fn q6_vsf_vfmax_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfmin_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vfmin_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vfmin_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vfmin_hf(vu, vv) } @@ -5259,7 +5272,7 @@ pub unsafe fn q6_vhf_vfmin_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfmin_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_vfmin_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_vfmin_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vfmin_sf(vu, vv) } @@ -5271,7 +5284,7 @@ pub unsafe fn q6_vsf_vfmin_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfneg_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vfneg_vhf(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vfneg_Vhf(vu: HvxVector) -> HvxVector { vfneg_hf(vu) } @@ -5283,7 +5296,7 @@ pub unsafe fn q6_vhf_vfneg_vhf(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfneg_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_vfneg_vsf(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_vfneg_Vsf(vu: HvxVector) -> HvxVector { vfneg_sf(vu) } @@ -5295,7 +5308,7 @@ pub unsafe fn q6_vsf_vfneg_vsf(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmax_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vmax_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vmax_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vmax_hf(vu, vv) } @@ -5307,7 +5320,7 @@ pub unsafe fn q6_vhf_vmax_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmax_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_vmax_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_vmax_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vmax_sf(vu, vv) } @@ -5319,7 +5332,7 @@ pub unsafe fn q6_vsf_vmax_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmin_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vmin_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vmin_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vmin_hf(vu, vv) } @@ -5331,7 +5344,7 @@ pub unsafe fn q6_vhf_vmin_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmin_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_vmin_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_vmin_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vmin_sf(vu, vv) } @@ -5343,7 +5356,7 @@ pub unsafe fn q6_vsf_vmin_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_hf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpy_hf_hf(vu, vv) } @@ -5355,7 +5368,7 @@ pub unsafe fn q6_vhf_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_hf_hf_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vmpyacc_vhfvhfvhf(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vmpyacc_VhfVhfVhf(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { vmpy_hf_hf_acc(vx, vu, vv) } @@ -5367,7 +5380,7 @@ pub unsafe fn q6_vhf_vmpyacc_vhfvhfvhf(vx: HvxVector, vu: HvxVector, vv: HvxVect #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf16))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf16_vmpy_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf16_vmpy_Vqf16Vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpy_qf16(vu, vv) } @@ -5379,7 +5392,7 @@ pub unsafe fn q6_vqf16_vmpy_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf16_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf16_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf16_vmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpy_qf16_hf(vu, vv) } @@ -5391,7 +5404,7 @@ pub unsafe fn q6_vqf16_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf16_mix_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf16_vmpy_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf16_vmpy_Vqf16Vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpy_qf16_mix_hf(vu, vv) } @@ -5403,7 +5416,7 @@ pub unsafe fn q6_vqf16_vmpy_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf32))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf32_vmpy_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf32_vmpy_Vqf32Vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpy_qf32(vu, vv) } @@ -5415,7 +5428,7 @@ pub unsafe fn q6_vqf32_vmpy_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf32_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wqf32_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wqf32_vmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vmpy_qf32_hf(vu, vv) } @@ -5427,7 +5440,7 @@ pub unsafe fn q6_wqf32_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPai #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf32_mix_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wqf32_vmpy_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wqf32_vmpy_Vqf16Vhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vmpy_qf32_mix_hf(vu, vv) } @@ -5439,7 +5452,7 @@ pub unsafe fn q6_wqf32_vmpy_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVectorP #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf32_qf16))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wqf32_vmpy_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wqf32_vmpy_Vqf16Vqf16(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vmpy_qf32_qf16(vu, vv) } @@ -5451,7 +5464,7 @@ pub unsafe fn q6_wqf32_vmpy_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf32_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf32_vmpy_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf32_vmpy_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpy_qf32_sf(vu, vv) } @@ -5463,7 +5476,7 @@ pub unsafe fn q6_vqf32_vmpy_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_sf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wsf_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wsf_vmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vmpy_sf_hf(vu, vv) } @@ -5475,7 +5488,7 @@ pub unsafe fn q6_wsf_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_sf_hf_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wsf_vmpyacc_wsfvhfvhf( +pub unsafe fn Q6_Wsf_vmpyacc_WsfVhfVhf( vxx: HvxVectorPair, vu: HvxVector, vv: HvxVector, @@ -5491,7 +5504,7 @@ pub unsafe fn q6_wsf_vmpyacc_wsfvhfvhf( #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_sf_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_vmpy_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_vmpy_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpy_sf_sf(vu, vv) } @@ -5503,7 +5516,7 @@ pub unsafe fn q6_vsf_vmpy_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf16_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf16_vsub_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vsub_hf(vu, vv) } @@ -5515,7 +5528,7 @@ pub unsafe fn q6_vqf16_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_hf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_vsub_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vsub_hf_hf(vu, vv) } @@ -5527,7 +5540,7 @@ pub unsafe fn q6_vhf_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_qf16))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf16_vsub_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf16_vsub_Vqf16Vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { vsub_qf16(vu, vv) } @@ -5539,7 +5552,7 @@ pub unsafe fn q6_vqf16_vsub_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_qf16_mix))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf16_vsub_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf16_vsub_Vqf16Vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { vsub_qf16_mix(vu, vv) } @@ -5551,7 +5564,7 @@ pub unsafe fn q6_vqf16_vsub_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_qf32))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf32_vsub_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf32_vsub_Vqf32Vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { vsub_qf32(vu, vv) } @@ -5563,7 +5576,7 @@ pub unsafe fn q6_vqf32_vsub_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVecto #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_qf32_mix))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf32_vsub_vqf32vsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf32_vsub_Vqf32Vsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vsub_qf32_mix(vu, vv) } @@ -5575,7 +5588,7 @@ pub unsafe fn q6_vqf32_vsub_vqf32vsf(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vqf32_vsub_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vqf32_vsub_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vsub_sf(vu, vv) } @@ -5587,7 +5600,7 @@ pub unsafe fn q6_vqf32_vsub_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_sf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_wsf_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Wsf_vsub_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vsub_sf_hf(vu, vv) } @@ -5599,7 +5612,7 @@ pub unsafe fn q6_wsf_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_sf_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_vsub_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_vsub_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { vsub_sf_sf(vu, vv) } @@ -5611,7 +5624,7 @@ pub unsafe fn q6_vsf_vsub_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] #[cfg_attr(test, assert_instr(vasrvuhubrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vasr_wuhvub_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vasr_WuhVub_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { vasrvuhubrndsat(vuu, vv) } @@ -5623,7 +5636,7 @@ pub unsafe fn q6_vub_vasr_wuhvub_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> H #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] #[cfg_attr(test, assert_instr(vasrvuhubsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vub_vasr_wuhvub_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vub_vasr_WuhVub_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { vasrvuhubsat(vuu, vv) } @@ -5635,7 +5648,7 @@ pub unsafe fn q6_vub_vasr_wuhvub_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVe #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] #[cfg_attr(test, assert_instr(vasrvwuhrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vasr_wwvuh_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vasr_WwVuh_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { vasrvwuhrndsat(vuu, vv) } @@ -5647,7 +5660,7 @@ pub unsafe fn q6_vuh_vasr_wwvuh_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> Hv #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] #[cfg_attr(test, assert_instr(vasrvwuhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vasr_wwvuh_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vasr_WwVuh_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { vasrvwuhsat(vuu, vv) } @@ -5659,7 +5672,7 @@ pub unsafe fn q6_vuh_vasr_wwvuh_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVec #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] #[cfg_attr(test, assert_instr(vmpyuhvs))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vuh_vmpy_vuhvuh_rs16(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vuh_vmpy_VuhVuh_rs16(vu: HvxVector, vv: HvxVector) -> HvxVector { vmpyuhvs(vu, vv) } @@ -5671,7 +5684,7 @@ pub unsafe fn q6_vuh_vmpy_vuhvuh_rs16(vu: HvxVector, vv: HvxVector) -> HvxVector #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] #[cfg_attr(test, assert_instr(vconv_h_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_equals_vhf(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_equals_Vhf(vu: HvxVector) -> HvxVector { vconv_h_hf(vu) } @@ -5683,7 +5696,7 @@ pub unsafe fn q6_vh_equals_vhf(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] #[cfg_attr(test, assert_instr(vconv_hf_h))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vhf_equals_vh(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vhf_equals_Vh(vu: HvxVector) -> HvxVector { vconv_hf_h(vu) } @@ -5695,7 +5708,7 @@ pub unsafe fn q6_vhf_equals_vh(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] #[cfg_attr(test, assert_instr(vconv_sf_w))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vsf_equals_vw(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vsf_equals_Vw(vu: HvxVector) -> HvxVector { vconv_sf_w(vu) } @@ -5707,7 +5720,7 @@ pub unsafe fn q6_vsf_equals_vw(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] #[cfg_attr(test, assert_instr(vconv_w_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_equals_vsf(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_equals_Vsf(vu: HvxVector) -> HvxVector { vconv_w_sf(vu) } @@ -5719,7 +5732,7 @@ pub unsafe fn q6_vw_equals_vsf(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(get_qfext))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vgetqfext_vr(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_V_vgetqfext_VR(vu: HvxVector, rt: i32) -> HvxVector { get_qfext(vu, rt) } @@ -5731,7 +5744,7 @@ pub unsafe fn q6_v_vgetqfext_vr(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(set_qfext))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vsetqfext_vr(vu: HvxVector, rt: i32) -> HvxVector { +pub unsafe fn Q6_V_vsetqfext_VR(vu: HvxVector, rt: i32) -> HvxVector { set_qfext(vu, rt) } @@ -5743,7 +5756,7 @@ pub unsafe fn q6_v_vsetqfext_vr(vu: HvxVector, rt: i32) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vabs_f8))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vabs_v(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vabs_V(vu: HvxVector) -> HvxVector { vabs_f8(vu) } @@ -5755,7 +5768,7 @@ pub unsafe fn q6_v_vabs_v(vu: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vcvt2_hf_b))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_whf_vcvt2_vb(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Whf_vcvt2_Vb(vu: HvxVector) -> HvxVectorPair { vcvt2_hf_b(vu) } @@ -5767,7 +5780,7 @@ pub unsafe fn q6_whf_vcvt2_vb(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vcvt2_hf_ub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_whf_vcvt2_vub(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Whf_vcvt2_Vub(vu: HvxVector) -> HvxVectorPair { vcvt2_hf_ub(vu) } @@ -5779,7 +5792,7 @@ pub unsafe fn q6_whf_vcvt2_vub(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vcvt_hf_f8))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_whf_vcvt_v(vu: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_Whf_vcvt_V(vu: HvxVector) -> HvxVectorPair { vcvt_hf_f8(vu) } @@ -5791,7 +5804,7 @@ pub unsafe fn q6_whf_vcvt_v(vu: HvxVector) -> HvxVectorPair { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vfmax_f8))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vfmax_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vfmax_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { vfmax_f8(vu, vv) } @@ -5803,7 +5816,7 @@ pub unsafe fn q6_v_vfmax_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vfmin_f8))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vfmin_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vfmin_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { vfmin_f8(vu, vv) } @@ -5815,7 +5828,7 @@ pub unsafe fn q6_v_vfmin_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vfneg_f8))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vfneg_v(vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vfneg_V(vu: HvxVector) -> HvxVector { vfneg_f8(vu) } @@ -5827,7 +5840,7 @@ pub unsafe fn q6_v_vfneg_v(vu: HvxVector) -> HvxVector { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_and_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { +pub unsafe fn Q6_Q_and_QQ(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { core::mem::transmute::(vandqrt( pred_and( vandvrt(core::mem::transmute::(qs), -1), @@ -5845,7 +5858,7 @@ pub unsafe fn q6_q_and_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_and_qqn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { +pub unsafe fn Q6_Q_and_QQn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { core::mem::transmute::(vandqrt( pred_and_n( vandvrt(core::mem::transmute::(qs), -1), @@ -5863,7 +5876,7 @@ pub unsafe fn q6_q_and_qqn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPre #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_not_q(qs: HvxVectorPred) -> HvxVectorPred { +pub unsafe fn Q6_Q_not_Q(qs: HvxVectorPred) -> HvxVectorPred { core::mem::transmute::(vandqrt( pred_not(vandvrt( core::mem::transmute::(qs), @@ -5881,7 +5894,7 @@ pub unsafe fn q6_q_not_q(qs: HvxVectorPred) -> HvxVectorPred { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_or_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { +pub unsafe fn Q6_Q_or_QQ(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { core::mem::transmute::(vandqrt( pred_or( vandvrt(core::mem::transmute::(qs), -1), @@ -5899,7 +5912,7 @@ pub unsafe fn q6_q_or_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_or_qqn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { +pub unsafe fn Q6_Q_or_QQn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { core::mem::transmute::(vandqrt( pred_or_n( vandvrt(core::mem::transmute::(qs), -1), @@ -5917,7 +5930,7 @@ pub unsafe fn q6_q_or_qqn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vsetq_r(rt: i32) -> HvxVectorPred { +pub unsafe fn Q6_Q_vsetq_R(rt: i32) -> HvxVectorPred { core::mem::transmute::(vandqrt(pred_scalar2(rt), -1)) } @@ -5929,7 +5942,7 @@ pub unsafe fn q6_q_vsetq_r(rt: i32) -> HvxVectorPred { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_xor_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { +pub unsafe fn Q6_Q_xor_QQ(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { core::mem::transmute::(vandqrt( pred_xor( vandvrt(core::mem::transmute::(qs), -1), @@ -5947,7 +5960,7 @@ pub unsafe fn q6_q_xor_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vmem_qnriv(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { +pub unsafe fn Q6_vmem_QnRIV(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { vS32b_nqpred_ai( vandvrt(core::mem::transmute::(qv), -1), rt, @@ -5963,7 +5976,7 @@ pub unsafe fn q6_vmem_qnriv(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vmem_qnriv_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { +pub unsafe fn Q6_vmem_QnRIV_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { vS32b_nt_nqpred_ai( vandvrt(core::mem::transmute::(qv), -1), rt, @@ -5979,7 +5992,7 @@ pub unsafe fn q6_vmem_qnriv_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVec #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vmem_qriv_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { +pub unsafe fn Q6_vmem_QRIV_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { vS32b_nt_qpred_ai( vandvrt(core::mem::transmute::(qv), -1), rt, @@ -5995,7 +6008,7 @@ pub unsafe fn q6_vmem_qriv_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVect #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vmem_qriv(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { +pub unsafe fn Q6_vmem_QRIV(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { vS32b_qpred_ai( vandvrt(core::mem::transmute::(qv), -1), rt, @@ -6011,7 +6024,7 @@ pub unsafe fn q6_vmem_qriv(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_condacc_qnvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_condacc_QnVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vaddbnq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -6027,7 +6040,7 @@ pub unsafe fn q6_vb_condacc_qnvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_condacc_qvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_condacc_QVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vaddbq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -6043,7 +6056,7 @@ pub unsafe fn q6_vb_condacc_qvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_condacc_qnvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_condacc_QnVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vaddhnq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -6059,7 +6072,7 @@ pub unsafe fn q6_vh_condacc_qnvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_condacc_qvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_condacc_QVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vaddhq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -6075,7 +6088,7 @@ pub unsafe fn q6_vh_condacc_qvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_condacc_qnvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_condacc_QnVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vaddwnq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -6091,7 +6104,7 @@ pub unsafe fn q6_vw_condacc_qnvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_condacc_qvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_condacc_QVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vaddwq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -6107,7 +6120,7 @@ pub unsafe fn q6_vw_condacc_qvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vand_qr(qu: HvxVectorPred, rt: i32) -> HvxVector { +pub unsafe fn Q6_V_vand_QR(qu: HvxVectorPred, rt: i32) -> HvxVector { vandvrt(core::mem::transmute::(qu), rt) } @@ -6119,7 +6132,7 @@ pub unsafe fn q6_v_vand_qr(qu: HvxVectorPred, rt: i32) -> HvxVector { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vandor_vqr(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector { +pub unsafe fn Q6_V_vandor_VQR(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector { vandvrt_acc(vx, core::mem::transmute::(qu), rt) } @@ -6131,7 +6144,7 @@ pub unsafe fn q6_v_vandor_vqr(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxV #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vand_vr(vu: HvxVector, rt: i32) -> HvxVectorPred { +pub unsafe fn Q6_Q_vand_VR(vu: HvxVector, rt: i32) -> HvxVectorPred { core::mem::transmute::(vandqrt(vu, rt)) } @@ -6143,7 +6156,7 @@ pub unsafe fn q6_q_vand_vr(vu: HvxVector, rt: i32) -> HvxVectorPred { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vandor_qvr(qx: HvxVectorPred, vu: HvxVector, rt: i32) -> HvxVectorPred { +pub unsafe fn Q6_Q_vandor_QVR(qx: HvxVectorPred, vu: HvxVector, rt: i32) -> HvxVectorPred { core::mem::transmute::(vandqrt_acc( core::mem::transmute::(qx), vu, @@ -6159,7 +6172,7 @@ pub unsafe fn q6_q_vandor_qvr(qx: HvxVectorPred, vu: HvxVector, rt: i32) -> HvxV #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eq_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { +pub unsafe fn Q6_Q_vcmp_eq_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { core::mem::transmute::(vandqrt(veqb(vu, vv), -1)) } @@ -6171,7 +6184,7 @@ pub unsafe fn q6_q_vcmp_eq_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eqand_qvbvb( +pub unsafe fn Q6_Q_vcmp_eqand_QVbVb( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6194,7 +6207,7 @@ pub unsafe fn q6_q_vcmp_eqand_qvbvb( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eqor_qvbvb( +pub unsafe fn Q6_Q_vcmp_eqor_QVbVb( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6217,7 +6230,7 @@ pub unsafe fn q6_q_vcmp_eqor_qvbvb( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eqxacc_qvbvb( +pub unsafe fn Q6_Q_vcmp_eqxacc_QVbVb( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6240,7 +6253,7 @@ pub unsafe fn q6_q_vcmp_eqxacc_qvbvb( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eq_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { +pub unsafe fn Q6_Q_vcmp_eq_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { core::mem::transmute::(vandqrt(veqh(vu, vv), -1)) } @@ -6252,7 +6265,7 @@ pub unsafe fn q6_q_vcmp_eq_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eqand_qvhvh( +pub unsafe fn Q6_Q_vcmp_eqand_QVhVh( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6275,7 +6288,7 @@ pub unsafe fn q6_q_vcmp_eqand_qvhvh( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eqor_qvhvh( +pub unsafe fn Q6_Q_vcmp_eqor_QVhVh( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6298,7 +6311,7 @@ pub unsafe fn q6_q_vcmp_eqor_qvhvh( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eqxacc_qvhvh( +pub unsafe fn Q6_Q_vcmp_eqxacc_QVhVh( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6321,7 +6334,7 @@ pub unsafe fn q6_q_vcmp_eqxacc_qvhvh( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eq_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { +pub unsafe fn Q6_Q_vcmp_eq_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { core::mem::transmute::(vandqrt(veqw(vu, vv), -1)) } @@ -6333,7 +6346,7 @@ pub unsafe fn q6_q_vcmp_eq_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eqand_qvwvw( +pub unsafe fn Q6_Q_vcmp_eqand_QVwVw( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6356,7 +6369,7 @@ pub unsafe fn q6_q_vcmp_eqand_qvwvw( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eqor_qvwvw( +pub unsafe fn Q6_Q_vcmp_eqor_QVwVw( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6379,7 +6392,7 @@ pub unsafe fn q6_q_vcmp_eqor_qvwvw( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_eqxacc_qvwvw( +pub unsafe fn Q6_Q_vcmp_eqxacc_QVwVw( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6402,7 +6415,7 @@ pub unsafe fn q6_q_vcmp_eqxacc_qvwvw( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gt_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { +pub unsafe fn Q6_Q_vcmp_gt_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { core::mem::transmute::(vandqrt(vgtb(vu, vv), -1)) } @@ -6414,7 +6427,7 @@ pub unsafe fn q6_q_vcmp_gt_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtand_qvbvb( +pub unsafe fn Q6_Q_vcmp_gtand_QVbVb( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6437,7 +6450,7 @@ pub unsafe fn q6_q_vcmp_gtand_qvbvb( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtor_qvbvb( +pub unsafe fn Q6_Q_vcmp_gtor_QVbVb( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6460,7 +6473,7 @@ pub unsafe fn q6_q_vcmp_gtor_qvbvb( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtxacc_qvbvb( +pub unsafe fn Q6_Q_vcmp_gtxacc_QVbVb( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6483,7 +6496,7 @@ pub unsafe fn q6_q_vcmp_gtxacc_qvbvb( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gt_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { +pub unsafe fn Q6_Q_vcmp_gt_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { core::mem::transmute::(vandqrt(vgth(vu, vv), -1)) } @@ -6495,7 +6508,7 @@ pub unsafe fn q6_q_vcmp_gt_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtand_qvhvh( +pub unsafe fn Q6_Q_vcmp_gtand_QVhVh( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6518,7 +6531,7 @@ pub unsafe fn q6_q_vcmp_gtand_qvhvh( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtor_qvhvh( +pub unsafe fn Q6_Q_vcmp_gtor_QVhVh( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6541,7 +6554,7 @@ pub unsafe fn q6_q_vcmp_gtor_qvhvh( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtxacc_qvhvh( +pub unsafe fn Q6_Q_vcmp_gtxacc_QVhVh( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6564,7 +6577,7 @@ pub unsafe fn q6_q_vcmp_gtxacc_qvhvh( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gt_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { +pub unsafe fn Q6_Q_vcmp_gt_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { core::mem::transmute::(vandqrt(vgtub(vu, vv), -1)) } @@ -6576,7 +6589,7 @@ pub unsafe fn q6_q_vcmp_gt_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPred #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtand_qvubvub( +pub unsafe fn Q6_Q_vcmp_gtand_QVubVub( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6599,7 +6612,7 @@ pub unsafe fn q6_q_vcmp_gtand_qvubvub( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtor_qvubvub( +pub unsafe fn Q6_Q_vcmp_gtor_QVubVub( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6622,7 +6635,7 @@ pub unsafe fn q6_q_vcmp_gtor_qvubvub( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtxacc_qvubvub( +pub unsafe fn Q6_Q_vcmp_gtxacc_QVubVub( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6645,7 +6658,7 @@ pub unsafe fn q6_q_vcmp_gtxacc_qvubvub( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gt_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { +pub unsafe fn Q6_Q_vcmp_gt_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { core::mem::transmute::(vandqrt(vgtuh(vu, vv), -1)) } @@ -6657,7 +6670,7 @@ pub unsafe fn q6_q_vcmp_gt_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtand_qvuhvuh( +pub unsafe fn Q6_Q_vcmp_gtand_QVuhVuh( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6680,7 +6693,7 @@ pub unsafe fn q6_q_vcmp_gtand_qvuhvuh( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtor_qvuhvuh( +pub unsafe fn Q6_Q_vcmp_gtor_QVuhVuh( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6703,7 +6716,7 @@ pub unsafe fn q6_q_vcmp_gtor_qvuhvuh( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtxacc_qvuhvuh( +pub unsafe fn Q6_Q_vcmp_gtxacc_QVuhVuh( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6726,7 +6739,7 @@ pub unsafe fn q6_q_vcmp_gtxacc_qvuhvuh( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gt_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { +pub unsafe fn Q6_Q_vcmp_gt_VuwVuw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { core::mem::transmute::(vandqrt(vgtuw(vu, vv), -1)) } @@ -6738,7 +6751,7 @@ pub unsafe fn q6_q_vcmp_gt_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtand_qvuwvuw( +pub unsafe fn Q6_Q_vcmp_gtand_QVuwVuw( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6761,7 +6774,7 @@ pub unsafe fn q6_q_vcmp_gtand_qvuwvuw( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtor_qvuwvuw( +pub unsafe fn Q6_Q_vcmp_gtor_QVuwVuw( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6784,7 +6797,7 @@ pub unsafe fn q6_q_vcmp_gtor_qvuwvuw( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtxacc_qvuwvuw( +pub unsafe fn Q6_Q_vcmp_gtxacc_QVuwVuw( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6807,7 +6820,7 @@ pub unsafe fn q6_q_vcmp_gtxacc_qvuwvuw( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gt_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { +pub unsafe fn Q6_Q_vcmp_gt_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { core::mem::transmute::(vandqrt(vgtw(vu, vv), -1)) } @@ -6819,7 +6832,7 @@ pub unsafe fn q6_q_vcmp_gt_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtand_qvwvw( +pub unsafe fn Q6_Q_vcmp_gtand_QVwVw( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6842,7 +6855,7 @@ pub unsafe fn q6_q_vcmp_gtand_qvwvw( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtor_qvwvw( +pub unsafe fn Q6_Q_vcmp_gtor_QVwVw( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6865,7 +6878,7 @@ pub unsafe fn q6_q_vcmp_gtor_qvwvw( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtxacc_qvwvw( +pub unsafe fn Q6_Q_vcmp_gtxacc_QVwVw( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -6888,7 +6901,7 @@ pub unsafe fn q6_q_vcmp_gtxacc_qvwvw( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vmux_qvv(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vmux_QVV(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVector { vmux( vandvrt(core::mem::transmute::(qt), -1), vu, @@ -6904,7 +6917,7 @@ pub unsafe fn q6_v_vmux_qvv(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_condnac_qnvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_condnac_QnVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vsubbnq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -6920,7 +6933,7 @@ pub unsafe fn q6_vb_condnac_qnvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_condnac_qvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vb_condnac_QVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vsubbq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -6936,7 +6949,7 @@ pub unsafe fn q6_vb_condnac_qvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_condnac_qnvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_condnac_QnVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vsubhnq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -6952,7 +6965,7 @@ pub unsafe fn q6_vh_condnac_qnvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_condnac_qvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vh_condnac_QVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vsubhq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -6968,7 +6981,7 @@ pub unsafe fn q6_vh_condnac_qvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_condnac_qnvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_condnac_QnVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vsubwnq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -6984,7 +6997,7 @@ pub unsafe fn q6_vw_condnac_qnvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_condnac_qvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_Vw_condnac_QVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { vsubwq( vandvrt(core::mem::transmute::(qv), -1), vx, @@ -7000,7 +7013,7 @@ pub unsafe fn q6_vw_condnac_qvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_w_vswap_qvv(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVectorPair { +pub unsafe fn Q6_W_vswap_QVV(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVectorPair { vswap( vandvrt(core::mem::transmute::(qt), -1), vu, @@ -7016,7 +7029,7 @@ pub unsafe fn q6_w_vswap_qvv(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vsetq2_r(rt: i32) -> HvxVectorPred { +pub unsafe fn Q6_Q_vsetq2_R(rt: i32) -> HvxVectorPred { core::mem::transmute::(vandqrt(pred_scalar2v2(rt), -1)) } @@ -7028,7 +7041,7 @@ pub unsafe fn q6_q_vsetq2_r(rt: i32) -> HvxVectorPred { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_qb_vshuffe_qhqh(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { +pub unsafe fn Q6_Qb_vshuffe_QhQh(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { core::mem::transmute::(vandqrt( shuffeqh( vandvrt(core::mem::transmute::(qs), -1), @@ -7046,7 +7059,7 @@ pub unsafe fn q6_qb_vshuffe_qhqh(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVec #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_qh_vshuffe_qwqw(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { +pub unsafe fn Q6_Qh_vshuffe_QwQw(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { core::mem::transmute::(vandqrt( shuffeqw( vandvrt(core::mem::transmute::(qs), -1), @@ -7064,7 +7077,7 @@ pub unsafe fn q6_qh_vshuffe_qwqw(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVec #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vand_qnr(qu: HvxVectorPred, rt: i32) -> HvxVector { +pub unsafe fn Q6_V_vand_QnR(qu: HvxVectorPred, rt: i32) -> HvxVector { vandnqrt( vandvrt(core::mem::transmute::(qu), -1), rt, @@ -7079,7 +7092,7 @@ pub unsafe fn q6_v_vand_qnr(qu: HvxVectorPred, rt: i32) -> HvxVector { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vandor_vqnr(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector { +pub unsafe fn Q6_V_vandor_VQnR(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector { vandnqrt_acc( vx, vandvrt(core::mem::transmute::(qu), -1), @@ -7095,7 +7108,7 @@ pub unsafe fn q6_v_vandor_vqnr(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> Hvx #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vand_qnv(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vand_QnV(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { vandvnqv( vandvrt(core::mem::transmute::(qv), -1), vu, @@ -7110,7 +7123,7 @@ pub unsafe fn q6_v_vand_qnv(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_v_vand_qv(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { +pub unsafe fn Q6_V_vand_QV(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { vandvqv( vandvrt(core::mem::transmute::(qv), -1), vu, @@ -7125,7 +7138,7 @@ pub unsafe fn q6_v_vand_qv(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vgather_aqrmvh( +pub unsafe fn Q6_vgather_AQRMVh( rs: *mut HvxVector, qs: HvxVectorPred, rt: i32, @@ -7149,7 +7162,7 @@ pub unsafe fn q6_vgather_aqrmvh( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vgather_aqrmww( +pub unsafe fn Q6_vgather_AQRMWw( rs: *mut HvxVector, qs: HvxVectorPred, rt: i32, @@ -7173,7 +7186,7 @@ pub unsafe fn q6_vgather_aqrmww( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vgather_aqrmvw( +pub unsafe fn Q6_vgather_AQRMVw( rs: *mut HvxVector, qs: HvxVectorPred, rt: i32, @@ -7197,7 +7210,7 @@ pub unsafe fn q6_vgather_aqrmvw( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vb_prefixsum_q(qv: HvxVectorPred) -> HvxVector { +pub unsafe fn Q6_Vb_prefixsum_Q(qv: HvxVectorPred) -> HvxVector { vprefixqb(vandvrt( core::mem::transmute::(qv), -1, @@ -7212,7 +7225,7 @@ pub unsafe fn q6_vb_prefixsum_q(qv: HvxVectorPred) -> HvxVector { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vh_prefixsum_q(qv: HvxVectorPred) -> HvxVector { +pub unsafe fn Q6_Vh_prefixsum_Q(qv: HvxVectorPred) -> HvxVector { vprefixqh(vandvrt( core::mem::transmute::(qv), -1, @@ -7227,7 +7240,7 @@ pub unsafe fn q6_vh_prefixsum_q(qv: HvxVectorPred) -> HvxVector { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_prefixsum_q(qv: HvxVectorPred) -> HvxVector { +pub unsafe fn Q6_Vw_prefixsum_Q(qv: HvxVectorPred) -> HvxVector { vprefixqw(vandvrt( core::mem::transmute::(qv), -1, @@ -7242,7 +7255,7 @@ pub unsafe fn q6_vw_prefixsum_q(qv: HvxVectorPred) -> HvxVector { #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vscatter_qrmvhv( +pub unsafe fn Q6_vscatter_QRMVhV( qs: HvxVectorPred, rt: i32, mu: i32, @@ -7266,7 +7279,7 @@ pub unsafe fn q6_vscatter_qrmvhv( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vscatter_qrmwwv( +pub unsafe fn Q6_vscatter_QRMWwV( qs: HvxVectorPred, rt: i32, mu: i32, @@ -7290,7 +7303,7 @@ pub unsafe fn q6_vscatter_qrmwwv( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vscatter_qrmvwv( +pub unsafe fn Q6_vscatter_QRMVwV( qs: HvxVectorPred, rt: i32, mu: i32, @@ -7314,7 +7327,7 @@ pub unsafe fn q6_vscatter_qrmvwv( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_vw_vadd_vwvwq_carry_sat( +pub unsafe fn Q6_Vw_vadd_VwVwQ_carry_sat( vu: HvxVector, vv: HvxVector, qs: HvxVectorPred, @@ -7334,7 +7347,7 @@ pub unsafe fn q6_vw_vadd_vwvwq_carry_sat( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { +pub unsafe fn Q6_Q_vcmp_gt_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { core::mem::transmute::(vandqrt(vgthf(vu, vv), -1)) } @@ -7346,7 +7359,7 @@ pub unsafe fn q6_q_vcmp_gt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtand_qvhfvhf( +pub unsafe fn Q6_Q_vcmp_gtand_QVhfVhf( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -7369,7 +7382,7 @@ pub unsafe fn q6_q_vcmp_gtand_qvhfvhf( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtor_qvhfvhf( +pub unsafe fn Q6_Q_vcmp_gtor_QVhfVhf( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -7392,7 +7405,7 @@ pub unsafe fn q6_q_vcmp_gtor_qvhfvhf( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtxacc_qvhfvhf( +pub unsafe fn Q6_Q_vcmp_gtxacc_QVhfVhf( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -7415,7 +7428,7 @@ pub unsafe fn q6_q_vcmp_gtxacc_qvhfvhf( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gt_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { +pub unsafe fn Q6_Q_vcmp_gt_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { core::mem::transmute::(vandqrt(vgtsf(vu, vv), -1)) } @@ -7427,7 +7440,7 @@ pub unsafe fn q6_q_vcmp_gt_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtand_qvsfvsf( +pub unsafe fn Q6_Q_vcmp_gtand_QVsfVsf( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -7450,7 +7463,7 @@ pub unsafe fn q6_q_vcmp_gtand_qvsfvsf( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtor_qvsfvsf( +pub unsafe fn Q6_Q_vcmp_gtor_QVsfVsf( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, @@ -7473,7 +7486,7 @@ pub unsafe fn q6_q_vcmp_gtor_qvsfvsf( #[inline(always)] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub unsafe fn q6_q_vcmp_gtxacc_qvsfvsf( +pub unsafe fn Q6_Q_vcmp_gtxacc_QVsfVsf( qx: HvxVectorPred, vu: HvxVector, vv: HvxVector, diff --git a/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs b/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs index 3cfbabfe0ab28..79837e2224ee0 100644 --- a/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs +++ b/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs @@ -515,12 +515,6 @@ fn parse_header(content: &str) -> Vec { intrinsics } -/// Convert Q6 name to Rust function name (lowercase with underscores) -fn q6_to_rust_name(q6_name: &str) -> String { - // Q6_V_hi_W -> q6_v_hi_w - q6_name.to_lowercase() -} - /// Generate the module documentation fn generate_module_doc(mode: VectorMode) -> String { format!( @@ -541,6 +535,18 @@ fn generate_module_doc(mode: VectorMode) -> String { //! //! To use this module, compile with `-C target-feature=+{target_feature}`. //! +//! ## Naming Convention +//! +//! Function names preserve the original Q6 naming case because the convention +//! uses case to distinguish register types: +//! - `W` (uppercase) = vector pair (`HvxVectorPair`) +//! - `V` (uppercase) = vector (`HvxVector`) +//! - `Q` (uppercase) = predicate (`HvxVectorPred`) +//! - `R` = scalar register (`i32`) +//! +//! For example, `Q6_W_vcombine_VV` operates on a vector pair while +//! `Q6_V_hi_W` extracts a vector from a pair. +//! //! ## Architecture Versions //! //! Different intrinsics require different HVX architecture versions. Use the @@ -577,6 +583,7 @@ fn generate_types(mode: VectorMode) -> String { format!( r#" #![allow(non_camel_case_types)] +#![allow(non_snake_case)] #[cfg(test)] use stdarch_test::assert_instr; @@ -1433,7 +1440,7 @@ fn generate_functions(intrinsics: &[IntrinsicInfo]) -> String { // Generate simple intrinsics for info in intrinsics.iter().filter(|i| !i.is_compound) { - let rust_name = q6_to_rust_name(&info.q6_name); + let rust_name = &info.q6_name; // Generate doc comment output.push_str(&format!("/// `{}`\n", info.asm_syntax)); @@ -1505,7 +1512,7 @@ fn generate_functions(intrinsics: &[IntrinsicInfo]) -> String { let overrides = get_compound_overrides(); for info in intrinsics.iter().filter(|i| i.is_compound) { if let Some(ref compound_expr) = info.compound_expr { - let rust_name = q6_to_rust_name(&info.q6_name); + let rust_name = &info.q6_name; // Get the primary instruction for assert_instr let _primary_instr = get_compound_primary_instr(compound_expr) diff --git a/library/stdarch/examples/gaussian.rs b/library/stdarch/examples/gaussian.rs index dea16f797aca6..a310c24def9a7 100644 --- a/library/stdarch/examples/gaussian.rs +++ b/library/stdarch/examples/gaussian.rs @@ -95,30 +95,30 @@ mod hvx { let below = *inp2.add(i); // Widen above + below to 16-bit using HvxVectorPair - // q6_wh_vadd_vubvub: adds two u8 vectors, producing u16 results in a pair - let above_plus_below: HvxVectorPair = q6_wh_vadd_vubvub(above, below); + // Q6_Wh_vadd_VubVub: adds two u8 vectors, producing u16 results in a pair + let above_plus_below: HvxVectorPair = Q6_Wh_vadd_VubVub(above, below); // Widen center * 2 (add center to itself) - let center_x2: HvxVectorPair = q6_wh_vadd_vubvub(center, center); + let center_x2: HvxVectorPair = Q6_Wh_vadd_VubVub(center, center); // Add them: (above + below) + (center * 2) = above + 2*center + below - let sum: HvxVectorPair = q6_wh_vadd_whwh(above_plus_below, center_x2); + let sum: HvxVectorPair = Q6_Wh_vadd_WhWh(above_plus_below, center_x2); // Extract high and low vectors from the pair (each contains u16 values) - let sum_lo = q6_v_lo_w(sum); // Lower 64 elements as i16 - let sum_hi = q6_v_hi_w(sum); // Upper 64 elements as i16 + let sum_lo = Q6_V_lo_W(sum); // Lower 64 elements as i16 + let sum_hi = Q6_V_hi_W(sum); // Upper 64 elements as i16 // Arithmetic right shift by 2 (divide by 4) with rounding // Add 2 for rounding before shift: (sum + 2) >> 2 - let two = q6_vh_vsplat_r(2); - let sum_lo_rounded = q6_vh_vadd_vhvh(sum_lo, two); - let sum_hi_rounded = q6_vh_vadd_vhvh(sum_hi, two); - let shifted_lo = q6_vh_vasr_vhvh(sum_lo_rounded, two); - let shifted_hi = q6_vh_vasr_vhvh(sum_hi_rounded, two); + let two = Q6_Vh_vsplat_R(2); + let sum_lo_rounded = Q6_Vh_vadd_VhVh(sum_lo, two); + let sum_hi_rounded = Q6_Vh_vadd_VhVh(sum_hi, two); + let shifted_lo = Q6_Vh_vasr_VhVh(sum_lo_rounded, two); + let shifted_hi = Q6_Vh_vasr_VhVh(sum_hi_rounded, two); // Pack back to u8 with saturation: takes hi and lo halfword vectors, // saturates to u8, and interleaves them back to original order - let result = q6_vub_vsat_vhvh(shifted_hi, shifted_lo); + let result = Q6_Vub_vsat_VhVh(shifted_hi, shifted_lo); *outp.add(i) = result; } @@ -142,44 +142,44 @@ mod hvx { let outp = dst as *mut HvxVector; let n_chunks = width / VLEN; - let mut prev = q6_v_vzero(); + let mut prev = Q6_V_vzero(); for i in 0..n_chunks { let curr = *inp.add(i); let next = if i + 1 < n_chunks { *inp.add(i + 1) } else { - q6_v_vzero() + Q6_V_vzero() }; // Left neighbor (x-1): shift curr right by 1 byte, filling from prev - let left = q6_v_vlalign_vvr(curr, prev, 1); + let left = Q6_V_vlalign_VVR(curr, prev, 1); // Right neighbor (x+1): shift curr left by 1 byte, filling from next - let right = q6_v_valign_vvr(next, curr, 1); + let right = Q6_V_valign_VVR(next, curr, 1); // Widen left + right to 16-bit - let left_plus_right: HvxVectorPair = q6_wh_vadd_vubvub(left, right); + let left_plus_right: HvxVectorPair = Q6_Wh_vadd_VubVub(left, right); // Widen center * 2 - let center_x2: HvxVectorPair = q6_wh_vadd_vubvub(curr, curr); + let center_x2: HvxVectorPair = Q6_Wh_vadd_VubVub(curr, curr); // Add: left + 2*center + right - let sum: HvxVectorPair = q6_wh_vadd_whwh(left_plus_right, center_x2); + let sum: HvxVectorPair = Q6_Wh_vadd_WhWh(left_plus_right, center_x2); // Extract high and low vectors - let sum_lo = q6_v_lo_w(sum); - let sum_hi = q6_v_hi_w(sum); + let sum_lo = Q6_V_lo_W(sum); + let sum_hi = Q6_V_hi_W(sum); // Arithmetic right shift by 2 with rounding - let two = q6_vh_vsplat_r(2); - let sum_lo_rounded = q6_vh_vadd_vhvh(sum_lo, two); - let sum_hi_rounded = q6_vh_vadd_vhvh(sum_hi, two); - let shifted_lo = q6_vh_vasr_vhvh(sum_lo_rounded, two); - let shifted_hi = q6_vh_vasr_vhvh(sum_hi_rounded, two); + let two = Q6_Vh_vsplat_R(2); + let sum_lo_rounded = Q6_Vh_vadd_VhVh(sum_lo, two); + let sum_hi_rounded = Q6_Vh_vadd_VhVh(sum_hi, two); + let shifted_lo = Q6_Vh_vasr_VhVh(sum_lo_rounded, two); + let shifted_hi = Q6_Vh_vasr_VhVh(sum_hi_rounded, two); // Pack back to u8 with saturation - let result = q6_vub_vsat_vhvh(shifted_hi, shifted_lo); + let result = Q6_Vub_vsat_VhVh(shifted_hi, shifted_lo); *outp.add(i) = result; From 8d85d1def70da65be7ec624c643c251d7f55b14b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eduardo=20S=C3=A1nchez=20Mu=C3=B1oz?= Date: Fri, 10 Apr 2026 20:12:46 +0200 Subject: [PATCH 24/64] Do not use `SimdM::new` and remove `simd_m_ty!` --- .../crates/core_arch/src/powerpc/altivec.rs | 24 +-- .../crates/core_arch/src/powerpc/vsx.rs | 12 +- .../crates/core_arch/src/s390x/vector.rs | 8 +- library/stdarch/crates/core_arch/src/simd.rs | 138 +++--------------- 4 files changed, 45 insertions(+), 137 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/powerpc/altivec.rs b/library/stdarch/crates/core_arch/src/powerpc/altivec.rs index f68121ad31716..78ec39f91ff33 100644 --- a/library/stdarch/crates/core_arch/src/powerpc/altivec.rs +++ b/library/stdarch/crates/core_arch/src/powerpc/altivec.rs @@ -4700,10 +4700,10 @@ mod tests { { $name: ident, $fn:ident, $ty: ident -> $ty_out: ident, [$($a:expr),+], [$($b:expr),+], [$($d:expr),+] } => { #[simd_test(enable = "altivec")] fn $name() { - let a: s_t_l!($ty) = $ty::new($($a),+).into(); - let b: s_t_l!($ty) = $ty::new($($b),+).into(); + let a: s_t_l!($ty) = $ty::from_array([$($a),+]).into(); + let b: s_t_l!($ty) = $ty::from_array([$($b),+]).into(); - let d = $ty_out::new($($d),+); + let d = $ty_out::from_array([$($d),+]); let r = $ty_out::from(unsafe { $fn(a, b) }); assert_eq!(d, r); } @@ -4711,8 +4711,8 @@ mod tests { { $name: ident, $fn:ident, $ty: ident -> $ty_out: ident, [$($a:expr),+], [$($b:expr),+], $d:expr } => { #[simd_test(enable = "altivec")] fn $name() { - let a: s_t_l!($ty) = $ty::new($($a),+).into(); - let b: s_t_l!($ty) = $ty::new($($b),+).into(); + let a: s_t_l!($ty) = $ty::from_array([$($a),+]).into(); + let b: s_t_l!($ty) = $ty::from_array([$($b),+]).into(); let r = $ty_out::from(unsafe { $fn(a, b) }); assert_eq!($d, r); @@ -4728,7 +4728,7 @@ mod tests { let d = vector_float::from(f32x4::new($($d),+)); let r = m32x4::from(unsafe { vec_cmple(vec_abs(vec_sub($fn(a), d)), vec_splats(f32::EPSILON)) }); - let e = m32x4::new(true, true, true, true); + let e = m32x4::splat(true); assert_eq!(e, r); } }; @@ -6212,10 +6212,10 @@ mod tests { [$($a:expr),+], [$($b:expr),+], [$($c:expr),+], [$($d:expr),+]} => { #[simd_test(enable = "altivec")] fn $name() { - let a = $longtype::from($shorttype::new($($a),+)); - let b = $longtype::from($shorttype::new($($b),+)); - let c = vector_unsigned_char::from(u8x16::new($($c),+)); - let d = $shorttype::new($($d),+); + let a = $longtype::from($shorttype::from_array([$($a),+])); + let b = $longtype::from($shorttype::from_array([$($b),+])); + let c = vector_unsigned_char::from(u8x16::from_array([$($c),+])); + let d = $shorttype::from_array([$($d),+]); let r = $shorttype::from(unsafe { vec_perm(a, b, c) }); assert_eq!(d, r); @@ -6664,7 +6664,7 @@ mod tests { let check = |a, b| { let r = m32x4::from(unsafe { vec_cmple(vec_abs(vec_sub(a, b)), vec_splats(f32::EPSILON)) }); - let e = m32x4::new(true, true, true, true); + let e = m32x4::splat(true); assert_eq!(e, r); }; @@ -6720,7 +6720,7 @@ mod tests { let r = m32x4::from(unsafe { vec_cmple(vec_abs(vec_sub(a, b)), vec_splats(f32::EPSILON)) }); println!("{:?} {:?}", a, b); - let e = m32x4::new(true, true, true, true); + let e = m32x4::splat(true); assert_eq!(e, r); }; diff --git a/library/stdarch/crates/core_arch/src/powerpc/vsx.rs b/library/stdarch/crates/core_arch/src/powerpc/vsx.rs index 0aac236173401..4a7b561a20c55 100644 --- a/library/stdarch/crates/core_arch/src/powerpc/vsx.rs +++ b/library/stdarch/crates/core_arch/src/powerpc/vsx.rs @@ -238,14 +238,14 @@ mod tests { {$name:ident, $shorttype:ident, $longtype:ident, [$($a:expr),+], [$($b:expr),+], [$($c:expr),+], [$($d:expr),+]} => { #[simd_test(enable = "vsx")] fn $name() { - let a = $longtype::from($shorttype::new($($a),+, $($b),+)); - let b = $longtype::from($shorttype::new($($c),+, $($d),+)); + let a = $longtype::from($shorttype::from_array([$($a),+, $($b),+])); + let b = $longtype::from($shorttype::from_array([$($c),+, $($d),+])); unsafe { - assert_eq!($shorttype::new($($a),+, $($c),+), $shorttype::from(vec_xxpermdi::<_, 0>(a, b))); - assert_eq!($shorttype::new($($b),+, $($c),+), $shorttype::from(vec_xxpermdi::<_, 1>(a, b))); - assert_eq!($shorttype::new($($a),+, $($d),+), $shorttype::from(vec_xxpermdi::<_, 2>(a, b))); - assert_eq!($shorttype::new($($b),+, $($d),+), $shorttype::from(vec_xxpermdi::<_, 3>(a, b))); + assert_eq!($shorttype::from_array([$($a),+, $($c),+]), $shorttype::from(vec_xxpermdi::<_, 0>(a, b))); + assert_eq!($shorttype::from_array([$($b),+, $($c),+]), $shorttype::from(vec_xxpermdi::<_, 1>(a, b))); + assert_eq!($shorttype::from_array([$($a),+, $($d),+]), $shorttype::from(vec_xxpermdi::<_, 2>(a, b))); + assert_eq!($shorttype::from_array([$($b),+, $($d),+]), $shorttype::from(vec_xxpermdi::<_, 3>(a, b))); } } } diff --git a/library/stdarch/crates/core_arch/src/s390x/vector.rs b/library/stdarch/crates/core_arch/src/s390x/vector.rs index 376c912c04090..fc5af1b14d0cd 100644 --- a/library/stdarch/crates/core_arch/src/s390x/vector.rs +++ b/library/stdarch/crates/core_arch/src/s390x/vector.rs @@ -6463,10 +6463,10 @@ mod tests { [$($a:expr),+], [$($b:expr),+], [$($c:expr),+], [$($d:expr),+]} => { #[simd_test(enable = "vector")] fn $name() { - let a = $longtype::from($shorttype::new($($a),+)); - let b = $longtype::from($shorttype::new($($b),+)); - let c = vector_unsigned_char::from(u8x16::new($($c),+)); - let d = $shorttype::new($($d),+); + let a = $longtype::from($shorttype::from_array([$($a),+])); + let b = $longtype::from($shorttype::from_array([$($b),+])); + let c = vector_unsigned_char::from(u8x16::from_array([$($c),+])); + let d = $shorttype::from_array([$($d),+]); let r = $shorttype::from(unsafe { vec_perm(a, b, c) }); assert_eq!(d, r); diff --git a/library/stdarch/crates/core_arch/src/simd.rs b/library/stdarch/crates/core_arch/src/simd.rs index 313c474792036..28716072f0a98 100644 --- a/library/stdarch/crates/core_arch/src/simd.rs +++ b/library/stdarch/crates/core_arch/src/simd.rs @@ -20,6 +20,8 @@ pub(crate) const unsafe fn simd_imin(a: T, b: T) -> T { pub(crate) unsafe trait SimdElement: Copy + const PartialEq + crate::fmt::Debug { + // SAFETY: all bits patterns of types implementing this trait must be valid + const ZERO: Self = unsafe { crate::mem::zeroed() }; } unsafe impl SimdElement for u8 {} @@ -42,8 +44,7 @@ pub(crate) struct Simd([T; N]); impl Simd { /// A value of this type where all elements are zeroed out. - // SAFETY: `T` implements `SimdElement`, so it is zeroable. - pub(crate) const ZERO: Self = unsafe { crate::mem::zeroed() }; + pub(crate) const ZERO: Self = Self::splat(T::ZERO); #[inline(always)] pub(crate) const fn from_array(elements: [T; N]) -> Self { @@ -163,7 +164,6 @@ impl SimdM { #[inline(always)] const fn bool_to_internal(x: bool) -> T { // SAFETY: `T` implements `SimdElement`, so all bit patterns are valid. - let zeros = const { unsafe { crate::mem::zeroed::() } }; let ones = const { // Ideally, this would be `transmute([0xFFu8; size_of::()])`, but // `size_of::()` is not allowed to use a generic parameter there. @@ -175,13 +175,24 @@ impl SimdM { } unsafe { r.assume_init() } }; - [zeros, ones][x as usize] + [T::ZERO, ones][x as usize] + } + + #[inline] + pub(crate) const fn from_array(elements: [bool; N]) -> Self { + let mut internal = [T::ZERO; N]; + let mut i = 0; + while i < N { + internal[i] = Self::bool_to_internal(elements[i]); + i += 1; + } + Self(internal) } #[inline] #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] pub(crate) const fn splat(value: bool) -> Self { - unsafe { crate::intrinsics::simd::simd_splat(value) } + unsafe { crate::intrinsics::simd::simd_splat(Self::bool_to_internal(value)) } } #[inline] @@ -218,19 +229,6 @@ impl crate::fmt::Debug for SimdM { } } -macro_rules! simd_m_ty { - ($id:ident [$elem_type:ident ; $len:literal]: $($param_name:ident),*) => { - pub(crate) type $id = SimdM<$elem_type, $len>; - - impl $id { - #[inline(always)] - pub(crate) const fn new($($param_name: bool),*) -> Self { - Self([$(Self::bool_to_internal($param_name)),*]) - } - } - } -} - // 16-bit wide types: simd_ty!(u8x2[u8;2]: x0, x1); @@ -363,38 +361,10 @@ simd_ty!( simd_ty!(f32x4[f32;4]: x0, x1, x2, x3); simd_ty!(f64x2[f64;2]: x0, x1); -simd_m_ty!( - m8x16[i8;16]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15 -); -simd_m_ty!( - m16x8[i16;8]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); -simd_m_ty!(m32x4[i32;4]: x0, x1, x2, x3); -simd_m_ty!(m64x2[i64;2]: x0, x1); +pub(crate) type m8x16 = SimdM; +pub(crate) type m16x8 = SimdM; +pub(crate) type m32x4 = SimdM; +pub(crate) type m64x2 = SimdM; // 256-bit wide types: @@ -564,71 +534,9 @@ simd_ty!( ); simd_ty!(f64x4[f64;4]: x0, x1, x2, x3); -simd_m_ty!( - m8x32[i8;32]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15, - x16, - x17, - x18, - x19, - x20, - x21, - x22, - x23, - x24, - x25, - x26, - x27, - x28, - x29, - x30, - x31 -); -simd_m_ty!( - m16x16[i16;16]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15 -); -simd_m_ty!( - m32x8[i32;8]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); +pub(crate) type m8x32 = SimdM; +pub(crate) type m16x16 = SimdM; +pub(crate) type m32x8 = SimdM; // 512-bit wide types: From 4103f7ddcec270b4233a3ffa69f8d443bd9b5d8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eduardo=20S=C3=A1nchez=20Mu=C3=B1oz?= Date: Fri, 10 Apr 2026 20:42:25 +0200 Subject: [PATCH 25/64] Do not use a macro to define `Simd::new` --- library/stdarch/crates/core_arch/src/simd.rs | 914 +++--------------- .../crates/core_arch/src/x86/avx512f.rs | 4 +- 2 files changed, 153 insertions(+), 765 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/simd.rs b/library/stdarch/crates/core_arch/src/simd.rs index 28716072f0a98..2c6829b465c42 100644 --- a/library/stdarch/crates/core_arch/src/simd.rs +++ b/library/stdarch/crates/core_arch/src/simd.rs @@ -101,6 +101,103 @@ impl crate::fmt::Debug for Simd { } } +impl Simd { + #[inline] + pub(crate) const fn new(x0: T) -> Self { + Self([x0]) + } +} + +impl Simd { + #[inline] + pub(crate) const fn new(x0: T, x1: T) -> Self { + Self([x0, x1]) + } +} + +impl Simd { + #[inline] + pub(crate) const fn new(x0: T, x1: T, x2: T, x3: T) -> Self { + Self([x0, x1, x2, x3]) + } +} + +impl Simd { + #[inline] + pub(crate) const fn new(x0: T, x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T) -> Self { + Self([x0, x1, x2, x3, x4, x5, x6, x7]) + } +} + +impl Simd { + #[inline] + pub(crate) const fn new( + x0: T, + x1: T, + x2: T, + x3: T, + x4: T, + x5: T, + x6: T, + x7: T, + x8: T, + x9: T, + x10: T, + x11: T, + x12: T, + x13: T, + x14: T, + x15: T, + ) -> Self { + Self([ + x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, + ]) + } +} + +impl Simd { + #[inline] + pub(crate) const fn new( + x0: T, + x1: T, + x2: T, + x3: T, + x4: T, + x5: T, + x6: T, + x7: T, + x8: T, + x9: T, + x10: T, + x11: T, + x12: T, + x13: T, + x14: T, + x15: T, + x16: T, + x17: T, + x18: T, + x19: T, + x20: T, + x21: T, + x22: T, + x23: T, + x24: T, + x25: T, + x26: T, + x27: T, + x28: T, + x29: T, + x30: T, + x31: T, + ) -> Self { + Self([ + x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, + x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30, x31, + ]) + } +} + impl Simd { #[inline] pub(crate) const fn to_bits(self) -> Simd { @@ -143,19 +240,6 @@ impl Simd { } } -macro_rules! simd_ty { - ($id:ident [$elem_type:ty ; $len:literal]: $($param_name:ident),*) => { - pub(crate) type $id = Simd<$elem_type, $len>; - - impl $id { - #[inline(always)] - pub(crate) const fn new($($param_name: $elem_type),*) -> Self { - Self([$($param_name),*]) - } - } - } -} - #[repr(simd)] #[derive(Copy)] pub(crate) struct SimdM([T; N]); @@ -231,135 +315,48 @@ impl crate::fmt::Debug for SimdM { // 16-bit wide types: -simd_ty!(u8x2[u8;2]: x0, x1); -simd_ty!(i8x2[i8;2]: x0, x1); +pub(crate) type u8x2 = Simd; +pub(crate) type i8x2 = Simd; // 32-bit wide types: -simd_ty!(u8x4[u8;4]: x0, x1, x2, x3); -simd_ty!(u16x2[u16;2]: x0, x1); +pub(crate) type u8x4 = Simd; +pub(crate) type u16x2 = Simd; -simd_ty!(i8x4[i8;4]: x0, x1, x2, x3); -simd_ty!(i16x2[i16;2]: x0, x1); +pub(crate) type i8x4 = Simd; +pub(crate) type i16x2 = Simd; // 64-bit wide types: -simd_ty!( - u8x8[u8;8]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); -simd_ty!(u16x4[u16;4]: x0, x1, x2, x3); -simd_ty!(u32x2[u32;2]: x0, x1); -simd_ty!(u64x1[u64;1]: x1); +pub(crate) type u8x8 = Simd; +pub(crate) type u16x4 = Simd; +pub(crate) type u32x2 = Simd; +pub(crate) type u64x1 = Simd; -simd_ty!( - i8x8[i8;8]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); -simd_ty!(i16x4[i16;4]: x0, x1, x2, x3); -simd_ty!(i32x2[i32;2]: x0, x1); -simd_ty!(i64x1[i64;1]: x1); +pub(crate) type i8x8 = Simd; +pub(crate) type i16x4 = Simd; +pub(crate) type i32x2 = Simd; +pub(crate) type i64x1 = Simd; -simd_ty!(f32x2[f32;2]: x0, x1); -simd_ty!(f64x1[f64;1]: x1); +pub(crate) type f16x4 = Simd; +pub(crate) type f32x2 = Simd; +pub(crate) type f64x1 = Simd; // 128-bit wide types: -simd_ty!( - u8x16[u8;16]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15 -); -simd_ty!( - u16x8[u16;8]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); -simd_ty!(u32x4[u32;4]: x0, x1, x2, x3); -simd_ty!(u64x2[u64;2]: x0, x1); - -simd_ty!( - i8x16[i8;16]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15 -); -simd_ty!( - i16x8[i16;8]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); -simd_ty!(i32x4[i32;4]: x0, x1, x2, x3); -simd_ty!(i64x2[i64;2]: x0, x1); +pub(crate) type u8x16 = Simd; +pub(crate) type u16x8 = Simd; +pub(crate) type u32x4 = Simd; +pub(crate) type u64x2 = Simd; -simd_ty!(f16x4[f16;4]: x0, x1, x2, x3); +pub(crate) type i8x16 = Simd; +pub(crate) type i16x8 = Simd; +pub(crate) type i32x4 = Simd; +pub(crate) type i64x2 = Simd; -simd_ty!( - f16x8[f16;8]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); -simd_ty!(f32x4[f32;4]: x0, x1, x2, x3); -simd_ty!(f64x2[f64;2]: x0, x1); +pub(crate) type f16x8 = Simd; +pub(crate) type f32x4 = Simd; +pub(crate) type f64x2 = Simd; pub(crate) type m8x16 = SimdM; pub(crate) type m16x8 = SimdM; @@ -368,171 +365,19 @@ pub(crate) type m64x2 = SimdM; // 256-bit wide types: -simd_ty!( - u8x32[u8;32]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15, - x16, - x17, - x18, - x19, - x20, - x21, - x22, - x23, - x24, - x25, - x26, - x27, - x28, - x29, - x30, - x31 -); -simd_ty!( - u16x16[u16;16]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15 -); -simd_ty!( - u32x8[u32;8]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); -simd_ty!(u64x4[u64;4]: x0, x1, x2, x3); +pub(crate) type u8x32 = Simd; +pub(crate) type u16x16 = Simd; +pub(crate) type u32x8 = Simd; +pub(crate) type u64x4 = Simd; -simd_ty!( - i8x32[i8;32]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15, - x16, - x17, - x18, - x19, - x20, - x21, - x22, - x23, - x24, - x25, - x26, - x27, - x28, - x29, - x30, - x31 -); -simd_ty!( - i16x16[i16;16]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15 -); -simd_ty!( - i32x8[i32;8]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); -simd_ty!(i64x4[i64;4]: x0, x1, x2, x3); +pub(crate) type i8x32 = Simd; +pub(crate) type i16x16 = Simd; +pub(crate) type i32x8 = Simd; +pub(crate) type i64x4 = Simd; -simd_ty!( - f16x16[f16;16]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15 -); -simd_ty!( - f32x8[f32;8]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); -simd_ty!(f64x4[f64;4]: x0, x1, x2, x3); +pub(crate) type f16x16 = Simd; +pub(crate) type f32x8 = Simd; +pub(crate) type f64x4 = Simd; pub(crate) type m8x32 = SimdM; pub(crate) type m16x16 = SimdM; @@ -540,483 +385,26 @@ pub(crate) type m32x8 = SimdM; // 512-bit wide types: -simd_ty!( - i8x64[i8;64]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15, - x16, - x17, - x18, - x19, - x20, - x21, - x22, - x23, - x24, - x25, - x26, - x27, - x28, - x29, - x30, - x31, - x32, - x33, - x34, - x35, - x36, - x37, - x38, - x39, - x40, - x41, - x42, - x43, - x44, - x45, - x46, - x47, - x48, - x49, - x50, - x51, - x52, - x53, - x54, - x55, - x56, - x57, - x58, - x59, - x60, - x61, - x62, - x63 -); - -simd_ty!( - u8x64[u8;64]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15, - x16, - x17, - x18, - x19, - x20, - x21, - x22, - x23, - x24, - x25, - x26, - x27, - x28, - x29, - x30, - x31, - x32, - x33, - x34, - x35, - x36, - x37, - x38, - x39, - x40, - x41, - x42, - x43, - x44, - x45, - x46, - x47, - x48, - x49, - x50, - x51, - x52, - x53, - x54, - x55, - x56, - x57, - x58, - x59, - x60, - x61, - x62, - x63 -); - -simd_ty!( - i16x32[i16;32]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15, - x16, - x17, - x18, - x19, - x20, - x21, - x22, - x23, - x24, - x25, - x26, - x27, - x28, - x29, - x30, - x31 -); - -simd_ty!( - u16x32[u16;32]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15, - x16, - x17, - x18, - x19, - x20, - x21, - x22, - x23, - x24, - x25, - x26, - x27, - x28, - x29, - x30, - x31 -); +pub(crate) type u8x64 = Simd; +pub(crate) type u16x32 = Simd; +pub(crate) type u32x16 = Simd; +pub(crate) type u64x8 = Simd; -simd_ty!( - i32x16[i32;16]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15 -); +pub(crate) type i8x64 = Simd; +pub(crate) type i16x32 = Simd; +pub(crate) type i32x16 = Simd; +pub(crate) type i64x8 = Simd; -simd_ty!( - u32x16[u32;16]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15 -); +pub(crate) type f16x32 = Simd; +pub(crate) type f32x16 = Simd; +pub(crate) type f64x8 = Simd; -simd_ty!( - f16x32[f16;32]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15, - x16, - x17, - x18, - x19, - x20, - x21, - x22, - x23, - x24, - x25, - x26, - x27, - x28, - x29, - x30, - x31 -); -simd_ty!( - f32x16[f32;16]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15 -); - -simd_ty!( - i64x8[i64;8]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); - -simd_ty!( - u64x8[u64;8]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); +// 1024-bit wide types: -simd_ty!( - f64x8[f64;8]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); +pub(crate) type u16x64 = Simd; +pub(crate) type u32x32 = Simd; -// 1024-bit wide types: -simd_ty!( - u16x64[u16;64]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15, - x16, - x17, - x18, - x19, - x20, - x21, - x22, - x23, - x24, - x25, - x26, - x27, - x28, - x29, - x30, - x31, - x32, - x33, - x34, - x35, - x36, - x37, - x38, - x39, - x40, - x41, - x42, - x43, - x44, - x45, - x46, - x47, - x48, - x49, - x50, - x51, - x52, - x53, - x54, - x55, - x56, - x57, - x58, - x59, - x60, - x61, - x62, - x63 -); -simd_ty!( - i32x32[i32;32]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15, - x16, - x17, - x18, - x19, - x20, - x21, - x22, - x23, - x24, - x25, - x26, - x27, - x28, - x29, - x30, - x31 -); -simd_ty!( - u32x32[u32;32]: - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15, - x16, - x17, - x18, - x19, - x20, - x21, - x22, - x23, - x24, - x25, - x26, - x27, - x28, - x29, - x30, - x31 -); +pub(crate) type i32x32 = Simd; /// Used to continue `Debug`ging SIMD types as `MySimd(1, 2, 3, 4)`, as they /// were before moving to array-based simd. diff --git a/library/stdarch/crates/core_arch/src/x86/avx512f.rs b/library/stdarch/crates/core_arch/src/x86/avx512f.rs index 3730496e1ec34..0c725402a9176 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512f.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512f.rs @@ -16807,12 +16807,12 @@ pub const fn _mm512_set_epi8( e0: i8, ) -> __m512i { unsafe { - let r = i8x64::new( + let r = i8x64::from_array([ e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63, - ); + ]); transmute(r) } } From c43636ea4b32bf5fd688897902ba972099d985a6 Mon Sep 17 00:00:00 2001 From: WANG Rui Date: Thu, 9 Apr 2026 20:02:25 +0800 Subject: [PATCH 26/64] loongarch: Avoid constant folding in tests to ensure SIMD coverage Use `black_box` on SIMD intrinsic inputs to prevent the compiler from constant folding SIMD operations, ensuring the corresponding SIMD instructions are actually emitted and covered by tests. --- .../core_arch/src/loongarch64/lasx/tests.rs | 4181 ++++++++++++++--- .../core_arch/src/loongarch64/lsx/tests.rs | 3848 ++++++++++++--- .../crates/stdarch-gen-loongarch/src/main.rs | 29 +- 3 files changed, 6536 insertions(+), 1522 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lasx/tests.rs b/library/stdarch/crates/core_arch/src/loongarch64/lasx/tests.rs index 319ce7cf98195..bd22d25771948 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/lasx/tests.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/lasx/tests.rs @@ -5,6 +5,7 @@ use crate::{ core_arch::{loongarch64::*, simd::*}, mem::transmute, }; +use std::hint::black_box; use stdarch_test::simd_test; #[simd_test(enable = "lasx")] @@ -24,7 +25,13 @@ unsafe fn test_lasx_xvsll_b() { 2882304449461665880, ); - assert_eq!(r, transmute(lasx_xvsll_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsll_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -44,7 +51,13 @@ unsafe fn test_lasx_xvsll_h() { 7061899947028838480, ); - assert_eq!(r, transmute(lasx_xvsll_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsll_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -76,7 +89,13 @@ unsafe fn test_lasx_xvsll_w() { 3598939055443673088, ); - assert_eq!(r, transmute(lasx_xvsll_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsll_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -100,7 +119,13 @@ unsafe fn test_lasx_xvsll_d() { -289787284616642560, ); - assert_eq!(r, transmute(lasx_xvsll_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsll_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -116,7 +141,7 @@ unsafe fn test_lasx_xvslli_b() { 5775955139904200724, ); - assert_eq!(r, transmute(lasx_xvslli_b::<2>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvslli_b::<2>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -132,7 +157,7 @@ unsafe fn test_lasx_xvslli_h() { -9223160928474759168, ); - assert_eq!(r, transmute(lasx_xvslli_h::<14>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvslli_h::<14>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -154,7 +179,7 @@ unsafe fn test_lasx_xvslli_w() { -1585267064908546048, ); - assert_eq!(r, transmute(lasx_xvslli_w::<24>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvslli_w::<24>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -172,7 +197,7 @@ unsafe fn test_lasx_xvslli_d() { -2305843009213693952, ); - assert_eq!(r, transmute(lasx_xvslli_d::<61>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvslli_d::<61>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -192,7 +217,13 @@ unsafe fn test_lasx_xvsra_b() { -505532365968836077, ); - assert_eq!(r, transmute(lasx_xvsra_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsra_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -212,7 +243,13 @@ unsafe fn test_lasx_xvsra_h() { 8725659825471543, ); - assert_eq!(r, transmute(lasx_xvsra_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsra_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -244,7 +281,13 @@ unsafe fn test_lasx_xvsra_w() { -36696200575105, ); - assert_eq!(r, transmute(lasx_xvsra_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsra_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -263,7 +306,13 @@ unsafe fn test_lasx_xvsra_d() { ); let r = i64x4::new(1, -129761412875, -1, 8464978396185); - assert_eq!(r, transmute(lasx_xvsra_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsra_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -279,7 +328,7 @@ unsafe fn test_lasx_xvsrai_b() { -218421283493247239, ); - assert_eq!(r, transmute(lasx_xvsrai_b::<4>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsrai_b::<4>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -290,7 +339,7 @@ unsafe fn test_lasx_xvsrai_h() { ); let r = i64x4::new(-281474976710658, 8589803520, -4295098367, 562941363552256); - assert_eq!(r, transmute(lasx_xvsrai_h::<14>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsrai_h::<14>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -307,7 +356,7 @@ unsafe fn test_lasx_xvsrai_w() { ); let r = i64x4::new(68719476730, -16, 17179869169, -25769803773); - assert_eq!(r, transmute(lasx_xvsrai_w::<27>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsrai_w::<27>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -320,7 +369,7 @@ unsafe fn test_lasx_xvsrai_d() { ); let r = i64x4::new(-2, 2, -6, -8); - assert_eq!(r, transmute(lasx_xvsrai_d::<60>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsrai_d::<60>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -340,7 +389,13 @@ unsafe fn test_lasx_xvsrar_b() { 302862676776648704, ); - assert_eq!(r, transmute(lasx_xvsrar_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsrar_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -360,7 +415,13 @@ unsafe fn test_lasx_xvsrar_h() { -2251658079567874, ); - assert_eq!(r, transmute(lasx_xvsrar_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsrar_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -392,7 +453,13 @@ unsafe fn test_lasx_xvsrar_w() { -1668156707832192, ); - assert_eq!(r, transmute(lasx_xvsrar_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsrar_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -411,7 +478,13 @@ unsafe fn test_lasx_xvsrar_d() { ); let r = i64x4::new(19951225, 505, -1907248091287715676, 362); - assert_eq!(r, transmute(lasx_xvsrar_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsrar_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -427,7 +500,7 @@ unsafe fn test_lasx_xvsrari_b() { 790117907428411639, ); - assert_eq!(r, transmute(lasx_xvsrari_b::<3>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsrari_b::<3>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -443,7 +516,7 @@ unsafe fn test_lasx_xvsrari_h() { -24488623625338826, ); - assert_eq!(r, transmute(lasx_xvsrari_h::<8>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsrari_h::<8>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -460,7 +533,7 @@ unsafe fn test_lasx_xvsrari_w() { ); let r = i64x4::new(-1, 4294967294, -2, -1); - assert_eq!(r, transmute(lasx_xvsrari_w::<29>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsrari_w::<29>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -473,7 +546,7 @@ unsafe fn test_lasx_xvsrari_d() { ); let r = i64x4::new(-3228, 4782, -4328, -2120); - assert_eq!(r, transmute(lasx_xvsrari_d::<50>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsrari_d::<50>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -493,7 +566,13 @@ unsafe fn test_lasx_xvsrl_b() { 3996105849293766692, ); - assert_eq!(r, transmute(lasx_xvsrl_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsrl_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -513,7 +592,13 @@ unsafe fn test_lasx_xvsrl_h() { 12385032119328029, ); - assert_eq!(r, transmute(lasx_xvsrl_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsrl_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -540,7 +625,13 @@ unsafe fn test_lasx_xvsrl_w() { ); let r = i64x4::new(3152506611213, 910538585043, 150899, 25769803779); - assert_eq!(r, transmute(lasx_xvsrl_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsrl_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -559,7 +650,13 @@ unsafe fn test_lasx_xvsrl_d() { ); let r = i64x4::new(22, 8215, 774027732, 338970735904462); - assert_eq!(r, transmute(lasx_xvsrl_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsrl_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -575,7 +672,7 @@ unsafe fn test_lasx_xvsrli_b() { 3694315145030590091, ); - assert_eq!(r, transmute(lasx_xvsrli_b::<0>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsrli_b::<0>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -586,7 +683,7 @@ unsafe fn test_lasx_xvsrli_h() { ); let r = i64x4::new(7036883009470493, 73014771737, 38655688722, 3096241924866048); - assert_eq!(r, transmute(lasx_xvsrli_h::<11>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsrli_h::<11>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -608,7 +705,7 @@ unsafe fn test_lasx_xvsrli_w() { 11669426172998, ); - assert_eq!(r, transmute(lasx_xvsrli_w::<17>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsrli_w::<17>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -621,7 +718,7 @@ unsafe fn test_lasx_xvsrli_d() { ); let r = i64x4::new(16617962184, 1898365962, 5054169972, 27969530398); - assert_eq!(r, transmute(lasx_xvsrli_d::<29>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsrli_d::<29>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -641,7 +738,13 @@ unsafe fn test_lasx_xvsrlr_b() { 150872911094481483, ); - assert_eq!(r, transmute(lasx_xvsrlr_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsrlr_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -661,7 +764,13 @@ unsafe fn test_lasx_xvsrlr_h() { 565118914199555, ); - assert_eq!(r, transmute(lasx_xvsrlr_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsrlr_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -693,7 +802,13 @@ unsafe fn test_lasx_xvsrlr_w() { 7085854838990307330, ); - assert_eq!(r, transmute(lasx_xvsrlr_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsrlr_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -712,7 +827,13 @@ unsafe fn test_lasx_xvsrlr_d() { ); let r = i64x4::new(1801, 481878, 1923591164085, 6280495597); - assert_eq!(r, transmute(lasx_xvsrlr_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsrlr_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -728,7 +849,7 @@ unsafe fn test_lasx_xvsrlri_b() { 2893318883870770962, ); - assert_eq!(r, transmute(lasx_xvsrlri_b::<2>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsrlri_b::<2>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -744,7 +865,7 @@ unsafe fn test_lasx_xvsrlri_h() { 32932658182619167, ); - assert_eq!(r, transmute(lasx_xvsrlri_h::<9>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsrlri_h::<9>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -761,7 +882,7 @@ unsafe fn test_lasx_xvsrlri_w() { ); let r = i64x4::new(8589934592, 8589934594, 4294967296, 8589934593); - assert_eq!(r, transmute(lasx_xvsrlri_w::<31>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsrlri_w::<31>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -779,7 +900,7 @@ unsafe fn test_lasx_xvsrlri_d() { 197693428197319479, ); - assert_eq!(r, transmute(lasx_xvsrlri_d::<6>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsrlri_d::<6>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -799,7 +920,13 @@ unsafe fn test_lasx_xvbitclr_b() { 2031321085346416701, ); - assert_eq!(r, transmute(lasx_xvbitclr_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvbitclr_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -819,7 +946,13 @@ unsafe fn test_lasx_xvbitclr_h() { -8417099780160452424, ); - assert_eq!(r, transmute(lasx_xvbitclr_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvbitclr_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -839,7 +972,13 @@ unsafe fn test_lasx_xvbitclr_w() { 436221668492520778, ); - assert_eq!(r, transmute(lasx_xvbitclr_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvbitclr_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -863,7 +1002,13 @@ unsafe fn test_lasx_xvbitclr_d() { 3668272799860684125, ); - assert_eq!(r, transmute(lasx_xvbitclr_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvbitclr_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -879,7 +1024,7 @@ unsafe fn test_lasx_xvbitclri_b() { 3065582154070828979, ); - assert_eq!(r, transmute(lasx_xvbitclri_b::<6>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvbitclri_b::<6>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -895,7 +1040,7 @@ unsafe fn test_lasx_xvbitclri_h() { 7727381349517352021, ); - assert_eq!(r, transmute(lasx_xvbitclri_h::<1>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvbitclri_h::<1>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -911,7 +1056,10 @@ unsafe fn test_lasx_xvbitclri_w() { -5611395396043530126, ); - assert_eq!(r, transmute(lasx_xvbitclri_w::<30>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvbitclri_w::<30>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -929,7 +1077,10 @@ unsafe fn test_lasx_xvbitclri_d() { -63139220754952887, ); - assert_eq!(r, transmute(lasx_xvbitclri_d::<46>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvbitclri_d::<46>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -949,7 +1100,13 @@ unsafe fn test_lasx_xvbitset_b() { -7702318388235109826, ); - assert_eq!(r, transmute(lasx_xvbitset_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvbitset_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -969,7 +1126,13 @@ unsafe fn test_lasx_xvbitset_h() { 1674099372676878223, ); - assert_eq!(r, transmute(lasx_xvbitset_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvbitset_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -989,7 +1152,13 @@ unsafe fn test_lasx_xvbitset_w() { -4953617511697867204, ); - assert_eq!(r, transmute(lasx_xvbitset_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvbitset_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1013,7 +1182,13 @@ unsafe fn test_lasx_xvbitset_d() { 8641001130845153939, ); - assert_eq!(r, transmute(lasx_xvbitset_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvbitset_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1029,7 +1204,7 @@ unsafe fn test_lasx_xvbitseti_b() { -3539275497407339017, ); - assert_eq!(r, transmute(lasx_xvbitseti_b::<7>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvbitseti_b::<7>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -1045,7 +1220,10 @@ unsafe fn test_lasx_xvbitseti_h() { -1050847327214912781, ); - assert_eq!(r, transmute(lasx_xvbitseti_h::<13>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvbitseti_h::<13>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -1061,7 +1239,10 @@ unsafe fn test_lasx_xvbitseti_w() { -1933536090599238411, ); - assert_eq!(r, transmute(lasx_xvbitseti_w::<29>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvbitseti_w::<29>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -1079,7 +1260,10 @@ unsafe fn test_lasx_xvbitseti_d() { 7640056937583456779, ); - assert_eq!(r, transmute(lasx_xvbitseti_d::<17>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvbitseti_d::<17>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -1099,7 +1283,13 @@ unsafe fn test_lasx_xvbitrev_b() { 8353346322052154032, ); - assert_eq!(r, transmute(lasx_xvbitrev_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvbitrev_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1119,7 +1309,13 @@ unsafe fn test_lasx_xvbitrev_h() { 1161012008856358603, ); - assert_eq!(r, transmute(lasx_xvbitrev_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvbitrev_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1139,7 +1335,13 @@ unsafe fn test_lasx_xvbitrev_w() { 2239715596821320928, ); - assert_eq!(r, transmute(lasx_xvbitrev_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvbitrev_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1163,7 +1365,13 @@ unsafe fn test_lasx_xvbitrev_d() { -7824300689033275105, ); - assert_eq!(r, transmute(lasx_xvbitrev_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvbitrev_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1179,7 +1387,7 @@ unsafe fn test_lasx_xvbitrevi_b() { -468434338938596352, ); - assert_eq!(r, transmute(lasx_xvbitrevi_b::<5>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvbitrevi_b::<5>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -1195,7 +1403,10 @@ unsafe fn test_lasx_xvbitrevi_h() { 4180481285432101679, ); - assert_eq!(r, transmute(lasx_xvbitrevi_h::<11>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvbitrevi_h::<11>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -1211,7 +1422,10 @@ unsafe fn test_lasx_xvbitrevi_w() { -7201777846932221130, ); - assert_eq!(r, transmute(lasx_xvbitrevi_w::<30>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvbitrevi_w::<30>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -1229,7 +1443,10 @@ unsafe fn test_lasx_xvbitrevi_d() { -1340750007927221124, ); - assert_eq!(r, transmute(lasx_xvbitrevi_d::<25>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvbitrevi_d::<25>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -1249,7 +1466,13 @@ unsafe fn test_lasx_xvadd_b() { 39834845715162790, ); - assert_eq!(r, transmute(lasx_xvadd_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvadd_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1269,7 +1492,13 @@ unsafe fn test_lasx_xvadd_h() { 3485514723534807729, ); - assert_eq!(r, transmute(lasx_xvadd_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvadd_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1301,7 +1530,13 @@ unsafe fn test_lasx_xvadd_w() { 449408456544649458, ); - assert_eq!(r, transmute(lasx_xvadd_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvadd_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1325,7 +1560,13 @@ unsafe fn test_lasx_xvadd_d() { -3333036084724254699, ); - assert_eq!(r, transmute(lasx_xvadd_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvadd_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1341,7 +1582,7 @@ unsafe fn test_lasx_xvaddi_bu() { 1765491911008659808, ); - assert_eq!(r, transmute(lasx_xvaddi_bu::<3>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvaddi_bu::<3>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -1357,7 +1598,7 @@ unsafe fn test_lasx_xvaddi_hu() { 4257614802810591100, ); - assert_eq!(r, transmute(lasx_xvaddi_hu::<1>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvaddi_hu::<1>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -1379,7 +1620,7 @@ unsafe fn test_lasx_xvaddi_wu() { 8831113348648816385, ); - assert_eq!(r, transmute(lasx_xvaddi_wu::<18>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvaddi_wu::<18>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -1397,7 +1638,7 @@ unsafe fn test_lasx_xvaddi_du() { -4546559236496052074, ); - assert_eq!(r, transmute(lasx_xvaddi_du::<24>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvaddi_du::<24>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -1417,7 +1658,13 @@ unsafe fn test_lasx_xvsub_b() { -7947080804470620196, ); - assert_eq!(r, transmute(lasx_xvsub_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsub_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1437,7 +1684,13 @@ unsafe fn test_lasx_xvsub_h() { -2694318201466204009, ); - assert_eq!(r, transmute(lasx_xvsub_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsub_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1469,7 +1722,13 @@ unsafe fn test_lasx_xvsub_w() { -4928352995773315889, ); - assert_eq!(r, transmute(lasx_xvsub_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsub_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1493,7 +1752,13 @@ unsafe fn test_lasx_xvsub_d() { -1297126209654251318, ); - assert_eq!(r, transmute(lasx_xvsub_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsub_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1509,7 +1774,7 @@ unsafe fn test_lasx_xvsubi_bu() { 6185872108420092159, ); - assert_eq!(r, transmute(lasx_xvsubi_bu::<13>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsubi_bu::<13>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -1525,7 +1790,7 @@ unsafe fn test_lasx_xvsubi_hu() { 1522443898558080492, ); - assert_eq!(r, transmute(lasx_xvsubi_hu::<7>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsubi_hu::<7>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -1547,7 +1812,7 @@ unsafe fn test_lasx_xvsubi_wu() { 1285045436848317605, ); - assert_eq!(r, transmute(lasx_xvsubi_wu::<26>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsubi_wu::<26>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -1565,7 +1830,7 @@ unsafe fn test_lasx_xvsubi_du() { 4145748346670499010, ); - assert_eq!(r, transmute(lasx_xvsubi_du::<12>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsubi_du::<12>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -1585,7 +1850,13 @@ unsafe fn test_lasx_xvmax_b() { 8535488153625188193, ); - assert_eq!(r, transmute(lasx_xvmax_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmax_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1605,7 +1876,13 @@ unsafe fn test_lasx_xvmax_h() { -4332902052436023459, ); - assert_eq!(r, transmute(lasx_xvmax_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmax_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1637,7 +1914,13 @@ unsafe fn test_lasx_xvmax_w() { 6702174376295843649, ); - assert_eq!(r, transmute(lasx_xvmax_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmax_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1661,7 +1944,13 @@ unsafe fn test_lasx_xvmax_d() { -880822478913123851, ); - assert_eq!(r, transmute(lasx_xvmax_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmax_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1677,7 +1966,7 @@ unsafe fn test_lasx_xvmaxi_b() { 5914634738497113077, ); - assert_eq!(r, transmute(lasx_xvmaxi_b::<-11>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvmaxi_b::<-11>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -1693,7 +1982,7 @@ unsafe fn test_lasx_xvmaxi_h() { 4406209242478280693, ); - assert_eq!(r, transmute(lasx_xvmaxi_h::<-11>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvmaxi_h::<-11>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -1715,7 +2004,7 @@ unsafe fn test_lasx_xvmaxi_w() { 22981864337, ); - assert_eq!(r, transmute(lasx_xvmaxi_w::<5>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvmaxi_w::<5>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -1733,7 +2022,7 @@ unsafe fn test_lasx_xvmaxi_d() { 2429249725865673045, ); - assert_eq!(r, transmute(lasx_xvmaxi_d::<-3>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvmaxi_d::<-3>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -1753,7 +2042,13 @@ unsafe fn test_lasx_xvmax_bu() { 4233495576175936231, ); - assert_eq!(r, transmute(lasx_xvmax_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmax_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1773,7 +2068,13 @@ unsafe fn test_lasx_xvmax_hu() { -1573457187787184228, ); - assert_eq!(r, transmute(lasx_xvmax_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmax_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1793,7 +2094,13 @@ unsafe fn test_lasx_xvmax_wu() { -7315994376096540525, ); - assert_eq!(r, transmute(lasx_xvmax_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmax_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1817,7 +2124,13 @@ unsafe fn test_lasx_xvmax_du() { 5141420152487342561, ); - assert_eq!(r, transmute(lasx_xvmax_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmax_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1833,7 +2146,7 @@ unsafe fn test_lasx_xvmaxi_bu() { -8478920119441971628, ); - assert_eq!(r, transmute(lasx_xvmaxi_bu::<10>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvmaxi_bu::<10>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -1849,7 +2162,7 @@ unsafe fn test_lasx_xvmaxi_hu() { 2580949584734723198, ); - assert_eq!(r, transmute(lasx_xvmaxi_hu::<15>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvmaxi_hu::<15>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -1865,7 +2178,7 @@ unsafe fn test_lasx_xvmaxi_wu() { 6328395255824707620, ); - assert_eq!(r, transmute(lasx_xvmaxi_wu::<12>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvmaxi_wu::<12>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -1883,7 +2196,7 @@ unsafe fn test_lasx_xvmaxi_du() { 3280369825537805033, ); - assert_eq!(r, transmute(lasx_xvmaxi_du::<18>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvmaxi_du::<18>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -1903,7 +2216,13 @@ unsafe fn test_lasx_xvmin_b() { -433018640497265418, ); - assert_eq!(r, transmute(lasx_xvmin_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmin_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1923,7 +2242,13 @@ unsafe fn test_lasx_xvmin_h() { -1753422264687927210, ); - assert_eq!(r, transmute(lasx_xvmin_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmin_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1949,7 +2274,13 @@ unsafe fn test_lasx_xvmin_w() { -710046880263550629, ); - assert_eq!(r, transmute(lasx_xvmin_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmin_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1973,7 +2304,13 @@ unsafe fn test_lasx_xvmin_d() { -3792381296290037631, ); - assert_eq!(r, transmute(lasx_xvmin_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmin_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -1989,7 +2326,7 @@ unsafe fn test_lasx_xvmini_b() { -1088282380739546975, ); - assert_eq!(r, transmute(lasx_xvmini_b::<-16>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvmini_b::<-16>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2005,7 +2342,7 @@ unsafe fn test_lasx_xvmini_h() { 2439077560844296, ); - assert_eq!(r, transmute(lasx_xvmini_h::<8>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvmini_h::<8>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2027,7 +2364,7 @@ unsafe fn test_lasx_xvmini_w() { -3162971646443594334, ); - assert_eq!(r, transmute(lasx_xvmini_w::<-16>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvmini_w::<-16>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2040,7 +2377,7 @@ unsafe fn test_lasx_xvmini_d() { ); let r = i64x4::new(-8, -8, -8, -8); - assert_eq!(r, transmute(lasx_xvmini_d::<-8>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvmini_d::<-8>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2060,7 +2397,13 @@ unsafe fn test_lasx_xvmin_bu() { 481055128827070653, ); - assert_eq!(r, transmute(lasx_xvmin_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmin_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2080,7 +2423,13 @@ unsafe fn test_lasx_xvmin_hu() { 4690886800975071114, ); - assert_eq!(r, transmute(lasx_xvmin_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmin_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2100,7 +2449,13 @@ unsafe fn test_lasx_xvmin_wu() { 841320412252129092, ); - assert_eq!(r, transmute(lasx_xvmin_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmin_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2124,7 +2479,13 @@ unsafe fn test_lasx_xvmin_du() { 168959420679376173, ); - assert_eq!(r, transmute(lasx_xvmin_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmin_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2140,7 +2501,7 @@ unsafe fn test_lasx_xvmini_bu() { 1803156197610166553, ); - assert_eq!(r, transmute(lasx_xvmini_bu::<25>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvmini_bu::<25>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2156,7 +2517,7 @@ unsafe fn test_lasx_xvmini_hu() { 7881419608817692, ); - assert_eq!(r, transmute(lasx_xvmini_hu::<28>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvmini_hu::<28>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2167,7 +2528,7 @@ unsafe fn test_lasx_xvmini_wu() { ); let r = i64x4::new(94489280534, 94489280534, 94489280534, 94489280534); - assert_eq!(r, transmute(lasx_xvmini_wu::<22>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvmini_wu::<22>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2180,7 +2541,7 @@ unsafe fn test_lasx_xvmini_du() { ); let r = i64x4::new(18, 18, 18, 18); - assert_eq!(r, transmute(lasx_xvmini_du::<18>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvmini_du::<18>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2195,7 +2556,13 @@ unsafe fn test_lasx_xvseq_b() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvseq_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvseq_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2210,7 +2577,13 @@ unsafe fn test_lasx_xvseq_h() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvseq_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvseq_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2237,7 +2610,13 @@ unsafe fn test_lasx_xvseq_w() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvseq_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvseq_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2256,7 +2635,13 @@ unsafe fn test_lasx_xvseq_d() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvseq_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvseq_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2267,7 +2652,7 @@ unsafe fn test_lasx_xvseqi_b() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvseqi_b::<-14>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvseqi_b::<-14>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2278,7 +2663,7 @@ unsafe fn test_lasx_xvseqi_h() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvseqi_h::<-8>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvseqi_h::<-8>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2295,7 +2680,7 @@ unsafe fn test_lasx_xvseqi_w() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvseqi_w::<-11>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvseqi_w::<-11>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2308,7 +2693,7 @@ unsafe fn test_lasx_xvseqi_d() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvseqi_d::<-2>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvseqi_d::<-2>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2328,7 +2713,13 @@ unsafe fn test_lasx_xvslt_b() { 71776119077994495, ); - assert_eq!(r, transmute(lasx_xvslt_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvslt_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2348,7 +2739,13 @@ unsafe fn test_lasx_xvslt_h() { -281470681743361, ); - assert_eq!(r, transmute(lasx_xvslt_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvslt_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2375,7 +2772,13 @@ unsafe fn test_lasx_xvslt_w() { ); let r = i64x4::new(4294967295, 0, -1, 0); - assert_eq!(r, transmute(lasx_xvslt_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvslt_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2394,7 +2797,13 @@ unsafe fn test_lasx_xvslt_d() { ); let r = i64x4::new(0, 0, -1, 0); - assert_eq!(r, transmute(lasx_xvslt_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvslt_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2410,7 +2819,7 @@ unsafe fn test_lasx_xvslti_b() { 71777218556067840, ); - assert_eq!(r, transmute(lasx_xvslti_b::<-16>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvslti_b::<-16>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2421,7 +2830,7 @@ unsafe fn test_lasx_xvslti_h() { ); let r = i64x4::new(4294967295, -1, -281470681743361, 65535); - assert_eq!(r, transmute(lasx_xvslti_h::<-4>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvslti_h::<-4>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2438,7 +2847,7 @@ unsafe fn test_lasx_xvslti_w() { ); let r = i64x4::new(-1, 0, -4294967296, -1); - assert_eq!(r, transmute(lasx_xvslti_w::<-4>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvslti_w::<-4>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2451,7 +2860,7 @@ unsafe fn test_lasx_xvslti_d() { ); let r = i64x4::new(-1, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvslti_d::<1>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvslti_d::<1>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2466,7 +2875,13 @@ unsafe fn test_lasx_xvslt_bu() { ); let r = i64x4::new(-1095216660481, 280375465083135, -1099494915841, 16711680); - assert_eq!(r, transmute(lasx_xvslt_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvslt_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2481,7 +2896,13 @@ unsafe fn test_lasx_xvslt_hu() { ); let r = i64x4::new(-281470681808896, 4294901760, -65536, 281470681808895); - assert_eq!(r, transmute(lasx_xvslt_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvslt_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2496,7 +2917,13 @@ unsafe fn test_lasx_xvslt_wu() { ); let r = i64x4::new(-1, -1, -4294967296, -1); - assert_eq!(r, transmute(lasx_xvslt_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvslt_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2515,7 +2942,13 @@ unsafe fn test_lasx_xvslt_du() { ); let r = i64x4::new(-1, -1, 0, -1); - assert_eq!(r, transmute(lasx_xvslt_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvslt_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2526,7 +2959,7 @@ unsafe fn test_lasx_xvslti_bu() { ); let r = i64x4::new(16711680, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvslti_bu::<7>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvslti_bu::<7>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2537,7 +2970,7 @@ unsafe fn test_lasx_xvslti_hu() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvslti_hu::<13>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvslti_hu::<13>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2548,7 +2981,7 @@ unsafe fn test_lasx_xvslti_wu() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvslti_wu::<8>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvslti_wu::<8>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2561,7 +2994,7 @@ unsafe fn test_lasx_xvslti_du() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvslti_du::<2>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvslti_du::<2>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2581,7 +3014,13 @@ unsafe fn test_lasx_xvsle_b() { 1095216726015, ); - assert_eq!(r, transmute(lasx_xvsle_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsle_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2596,7 +3035,13 @@ unsafe fn test_lasx_xvsle_h() { ); let r = i64x4::new(-1, 4294901760, 4294901760, -281470681743361); - assert_eq!(r, transmute(lasx_xvsle_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsle_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2623,7 +3068,13 @@ unsafe fn test_lasx_xvsle_w() { ); let r = i64x4::new(-4294967296, 0, -1, -4294967296); - assert_eq!(r, transmute(lasx_xvsle_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsle_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2642,7 +3093,13 @@ unsafe fn test_lasx_xvsle_d() { ); let r = i64x4::new(-1, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvsle_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsle_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2658,7 +3115,7 @@ unsafe fn test_lasx_xvslei_b() { 280375465148415, ); - assert_eq!(r, transmute(lasx_xvslei_b::<-14>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvslei_b::<-14>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2669,7 +3126,7 @@ unsafe fn test_lasx_xvslei_h() { ); let r = i64x4::new(-65536, -4294901761, 281474976710655, -65536); - assert_eq!(r, transmute(lasx_xvslei_h::<-15>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvslei_h::<-15>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2680,7 +3137,7 @@ unsafe fn test_lasx_xvslei_w() { ); let r = i64x4::new(-4294967296, 0, -1, 0); - assert_eq!(r, transmute(lasx_xvslei_w::<-3>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvslei_w::<-3>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2693,7 +3150,7 @@ unsafe fn test_lasx_xvslei_d() { ); let r = i64x4::new(-1, 0, -1, -1); - assert_eq!(r, transmute(lasx_xvslei_d::<6>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvslei_d::<6>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2713,7 +3170,13 @@ unsafe fn test_lasx_xvsle_bu() { 281474976710655, ); - assert_eq!(r, transmute(lasx_xvsle_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsle_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2728,7 +3191,13 @@ unsafe fn test_lasx_xvsle_hu() { ); let r = i64x4::new(281474976645120, -4294967296, 281470681808895, 0); - assert_eq!(r, transmute(lasx_xvsle_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsle_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2743,7 +3212,13 @@ unsafe fn test_lasx_xvsle_wu() { ); let r = i64x4::new(-4294967296, -1, 0, 0); - assert_eq!(r, transmute(lasx_xvsle_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsle_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2762,7 +3237,13 @@ unsafe fn test_lasx_xvsle_du() { ); let r = i64x4::new(0, -1, 0, -1); - assert_eq!(r, transmute(lasx_xvsle_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsle_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2773,7 +3254,7 @@ unsafe fn test_lasx_xvslei_bu() { ); let r = i64x4::new(72056494526365440, 280375465082880, 71776119077928960, 0); - assert_eq!(r, transmute(lasx_xvslei_bu::<29>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvslei_bu::<29>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2784,7 +3265,7 @@ unsafe fn test_lasx_xvslei_hu() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvslei_hu::<30>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvslei_hu::<30>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2795,7 +3276,7 @@ unsafe fn test_lasx_xvslei_wu() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvslei_wu::<31>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvslei_wu::<31>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2808,7 +3289,7 @@ unsafe fn test_lasx_xvslei_du() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvslei_du::<5>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvslei_du::<5>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2824,7 +3305,7 @@ unsafe fn test_lasx_xvsat_b() { 1985954429852520914, ); - assert_eq!(r, transmute(lasx_xvsat_b::<7>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsat_b::<7>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2840,7 +3321,7 @@ unsafe fn test_lasx_xvsat_h() { 1152903912689234618, ); - assert_eq!(r, transmute(lasx_xvsat_h::<12>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsat_h::<12>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2857,7 +3338,7 @@ unsafe fn test_lasx_xvsat_w() { ); let r = i64x4::new(-34359738361, 34359738360, -30064771080, -34359738361); - assert_eq!(r, transmute(lasx_xvsat_w::<3>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsat_w::<3>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2875,7 +3356,7 @@ unsafe fn test_lasx_xvsat_d() { 6102033771404793023, ); - assert_eq!(r, transmute(lasx_xvsat_d::<63>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsat_d::<63>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2891,7 +3372,7 @@ unsafe fn test_lasx_xvsat_bu() { 2539795165049929535, ); - assert_eq!(r, transmute(lasx_xvsat_bu::<5>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsat_bu::<5>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2907,7 +3388,7 @@ unsafe fn test_lasx_xvsat_hu() { 1970354902204423, ); - assert_eq!(r, transmute(lasx_xvsat_hu::<2>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsat_hu::<2>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2918,7 +3399,7 @@ unsafe fn test_lasx_xvsat_wu() { ); let r = i64x4::new(270582939711, 270582939711, 270582939711, 270582939711); - assert_eq!(r, transmute(lasx_xvsat_wu::<5>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsat_wu::<5>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2931,7 +3412,7 @@ unsafe fn test_lasx_xvsat_du() { ); let r = i64x4::new(8796093022207, 8796093022207, 8796093022207, 8796093022207); - assert_eq!(r, transmute(lasx_xvsat_du::<42>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvsat_du::<42>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -2951,7 +3432,13 @@ unsafe fn test_lasx_xvadda_b() { -6512388827583513148, ); - assert_eq!(r, transmute(lasx_xvadda_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvadda_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2971,7 +3458,13 @@ unsafe fn test_lasx_xvadda_h() { 4288196905584441792, ); - assert_eq!(r, transmute(lasx_xvadda_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvadda_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -2997,7 +3490,13 @@ unsafe fn test_lasx_xvadda_w() { 7114837115730115925, ); - assert_eq!(r, transmute(lasx_xvadda_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvadda_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3021,7 +3520,13 @@ unsafe fn test_lasx_xvadda_d() { -3532969990801796507, ); - assert_eq!(r, transmute(lasx_xvadda_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvadda_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3041,7 +3546,13 @@ unsafe fn test_lasx_xvsadd_b() { 3530119333939728429, ); - assert_eq!(r, transmute(lasx_xvsadd_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsadd_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3061,7 +3572,13 @@ unsafe fn test_lasx_xvsadd_h() { -5137195089227040637, ); - assert_eq!(r, transmute(lasx_xvsadd_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsadd_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3093,7 +3610,13 @@ unsafe fn test_lasx_xvsadd_w() { 6493388403303310332, ); - assert_eq!(r, transmute(lasx_xvsadd_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsadd_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3117,7 +3640,13 @@ unsafe fn test_lasx_xvsadd_d() { -1670245304326307655, ); - assert_eq!(r, transmute(lasx_xvsadd_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsadd_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3137,7 +3666,13 @@ unsafe fn test_lasx_xvsadd_bu() { -380207497217, ); - assert_eq!(r, transmute(lasx_xvsadd_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsadd_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3157,7 +3692,13 @@ unsafe fn test_lasx_xvsadd_hu() { -2766274561, ); - assert_eq!(r, transmute(lasx_xvsadd_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsadd_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3177,7 +3718,13 @@ unsafe fn test_lasx_xvsadd_wu() { 9110967605937569791, ); - assert_eq!(r, transmute(lasx_xvsadd_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsadd_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3196,7 +3743,13 @@ unsafe fn test_lasx_xvsadd_du() { ); let r = i64x4::new(-1, -7683287700352967836, -3264735658191843562, -1); - assert_eq!(r, transmute(lasx_xvsadd_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsadd_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3216,7 +3769,13 @@ unsafe fn test_lasx_xvavg_b() { -2451086284962613015, ); - assert_eq!(r, transmute(lasx_xvavg_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvavg_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3236,7 +3795,13 @@ unsafe fn test_lasx_xvavg_h() { -6082277202109387491, ); - assert_eq!(r, transmute(lasx_xvavg_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvavg_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3268,7 +3833,13 @@ unsafe fn test_lasx_xvavg_w() { -97541447405991454, ); - assert_eq!(r, transmute(lasx_xvavg_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvavg_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3292,7 +3863,13 @@ unsafe fn test_lasx_xvavg_d() { 743619511763122382, ); - assert_eq!(r, transmute(lasx_xvavg_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvavg_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3312,7 +3889,13 @@ unsafe fn test_lasx_xvavg_bu() { 5794025379951354001, ); - assert_eq!(r, transmute(lasx_xvavg_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvavg_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3332,7 +3915,13 @@ unsafe fn test_lasx_xvavg_hu() { -3939723307751543404, ); - assert_eq!(r, transmute(lasx_xvavg_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvavg_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3351,7 +3940,13 @@ unsafe fn test_lasx_xvavg_wu() { 6180173283312674740, ); - assert_eq!(r, transmute(lasx_xvavg_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvavg_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3375,7 +3970,13 @@ unsafe fn test_lasx_xvavg_du() { -9048945872629561085, ); - assert_eq!(r, transmute(lasx_xvavg_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvavg_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3395,7 +3996,13 @@ unsafe fn test_lasx_xvavgr_b() { -1577916506278329386, ); - assert_eq!(r, transmute(lasx_xvavgr_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvavgr_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3415,7 +4022,13 @@ unsafe fn test_lasx_xvavgr_h() { 1044782302812228671, ); - assert_eq!(r, transmute(lasx_xvavgr_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvavgr_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3447,7 +4060,13 @@ unsafe fn test_lasx_xvavgr_w() { 4983380877656540978, ); - assert_eq!(r, transmute(lasx_xvavgr_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvavgr_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3471,7 +4090,13 @@ unsafe fn test_lasx_xvavgr_d() { 229317404291257478, ); - assert_eq!(r, transmute(lasx_xvavgr_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvavgr_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3491,7 +4116,13 @@ unsafe fn test_lasx_xvavgr_bu() { 8511681618342279077, ); - assert_eq!(r, transmute(lasx_xvavgr_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvavgr_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3511,7 +4142,13 @@ unsafe fn test_lasx_xvavgr_hu() { -4835281559523879916, ); - assert_eq!(r, transmute(lasx_xvavgr_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvavgr_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3531,7 +4168,13 @@ unsafe fn test_lasx_xvavgr_wu() { 2489338192049926342, ); - assert_eq!(r, transmute(lasx_xvavgr_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvavgr_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3555,7 +4198,13 @@ unsafe fn test_lasx_xvavgr_du() { 6414723233875186966, ); - assert_eq!(r, transmute(lasx_xvavgr_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvavgr_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3575,7 +4224,13 @@ unsafe fn test_lasx_xvssub_b() { -4561472970538678093, ); - assert_eq!(r, transmute(lasx_xvssub_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssub_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3595,7 +4250,13 @@ unsafe fn test_lasx_xvssub_h() { 8048307602867637285, ); - assert_eq!(r, transmute(lasx_xvssub_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssub_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3627,7 +4288,13 @@ unsafe fn test_lasx_xvssub_w() { 4655436811119524629, ); - assert_eq!(r, transmute(lasx_xvssub_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssub_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3651,7 +4318,13 @@ unsafe fn test_lasx_xvssub_d() { -9223372036854775808, ); - assert_eq!(r, transmute(lasx_xvssub_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssub_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3671,7 +4344,13 @@ unsafe fn test_lasx_xvssub_bu() { 864691185841012929, ); - assert_eq!(r, transmute(lasx_xvssub_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssub_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3691,7 +4370,13 @@ unsafe fn test_lasx_xvssub_hu() { 188750927758467, ); - assert_eq!(r, transmute(lasx_xvssub_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssub_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3711,7 +4396,13 @@ unsafe fn test_lasx_xvssub_wu() { 3974517532346153551, ); - assert_eq!(r, transmute(lasx_xvssub_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssub_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3730,7 +4421,13 @@ unsafe fn test_lasx_xvssub_du() { ); let r = i64x4::new(1075384133325788465, 0, 8236940487074099359, 0); - assert_eq!(r, transmute(lasx_xvssub_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssub_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3750,7 +4447,13 @@ unsafe fn test_lasx_xvabsd_b() { 4109603046844106624, ); - assert_eq!(r, transmute(lasx_xvabsd_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvabsd_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3770,7 +4473,13 @@ unsafe fn test_lasx_xvabsd_h() { 5513891007581016946, ); - assert_eq!(r, transmute(lasx_xvabsd_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvabsd_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3802,7 +4511,13 @@ unsafe fn test_lasx_xvabsd_w() { -7014776540975538355, ); - assert_eq!(r, transmute(lasx_xvabsd_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvabsd_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3826,7 +4541,13 @@ unsafe fn test_lasx_xvabsd_d() { 4722306005291245989, ); - assert_eq!(r, transmute(lasx_xvabsd_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvabsd_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3846,7 +4567,13 @@ unsafe fn test_lasx_xvabsd_bu() { 1887319547440621943, ); - assert_eq!(r, transmute(lasx_xvabsd_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvabsd_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3866,7 +4593,13 @@ unsafe fn test_lasx_xvabsd_hu() { 1864011964690965056, ); - assert_eq!(r, transmute(lasx_xvabsd_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvabsd_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3886,7 +4619,13 @@ unsafe fn test_lasx_xvabsd_wu() { 1525979489064328670, ); - assert_eq!(r, transmute(lasx_xvabsd_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvabsd_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3910,7 +4649,13 @@ unsafe fn test_lasx_xvabsd_du() { 2127486190004927946, ); - assert_eq!(r, transmute(lasx_xvabsd_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvabsd_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3930,7 +4675,13 @@ unsafe fn test_lasx_xvmul_b() { -9159357540886189840, ); - assert_eq!(r, transmute(lasx_xvmul_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmul_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3950,7 +4701,13 @@ unsafe fn test_lasx_xvmul_h() { -7534790044979024262, ); - assert_eq!(r, transmute(lasx_xvmul_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmul_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -3982,7 +4739,13 @@ unsafe fn test_lasx_xvmul_w() { 1142495638330554240, ); - assert_eq!(r, transmute(lasx_xvmul_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmul_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4006,7 +4769,13 @@ unsafe fn test_lasx_xvmul_d() { -3668010491661410128, ); - assert_eq!(r, transmute(lasx_xvmul_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmul_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4032,7 +4801,11 @@ unsafe fn test_lasx_xvmadd_b() { assert_eq!( r, - transmute(lasx_xvmadd_b(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvmadd_b( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -4059,7 +4832,11 @@ unsafe fn test_lasx_xvmadd_h() { assert_eq!( r, - transmute(lasx_xvmadd_h(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvmadd_h( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -4104,7 +4881,11 @@ unsafe fn test_lasx_xvmadd_w() { assert_eq!( r, - transmute(lasx_xvmadd_w(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvmadd_w( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -4137,7 +4918,11 @@ unsafe fn test_lasx_xvmadd_d() { assert_eq!( r, - transmute(lasx_xvmadd_d(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvmadd_d( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -4164,7 +4949,11 @@ unsafe fn test_lasx_xvmsub_b() { assert_eq!( r, - transmute(lasx_xvmsub_b(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvmsub_b( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -4191,7 +4980,11 @@ unsafe fn test_lasx_xvmsub_h() { assert_eq!( r, - transmute(lasx_xvmsub_h(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvmsub_h( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -4236,7 +5029,11 @@ unsafe fn test_lasx_xvmsub_w() { assert_eq!( r, - transmute(lasx_xvmsub_w(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvmsub_w( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -4269,7 +5066,11 @@ unsafe fn test_lasx_xvmsub_d() { assert_eq!( r, - transmute(lasx_xvmsub_d(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvmsub_d( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -4285,7 +5086,13 @@ unsafe fn test_lasx_xvdiv_b() { ); let r = i64x4::new(67174400, 843334041468931, 16515072, 1090921824000); - assert_eq!(r, transmute(lasx_xvdiv_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvdiv_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4305,7 +5112,13 @@ unsafe fn test_lasx_xvdiv_h() { -281470681939967, ); - assert_eq!(r, transmute(lasx_xvdiv_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvdiv_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4332,7 +5145,13 @@ unsafe fn test_lasx_xvdiv_w() { ); let r = i64x4::new(-25769803778, 4294967295, 34359738365, 1); - assert_eq!(r, transmute(lasx_xvdiv_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvdiv_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4351,7 +5170,13 @@ unsafe fn test_lasx_xvdiv_d() { ); let r = i64x4::new(-3, 0, -3, 0); - assert_eq!(r, transmute(lasx_xvdiv_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvdiv_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4371,7 +5196,13 @@ unsafe fn test_lasx_xvdiv_bu() { 144118486677848127, ); - assert_eq!(r, transmute(lasx_xvdiv_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvdiv_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4386,7 +5217,13 @@ unsafe fn test_lasx_xvdiv_hu() { ); let r = i64x4::new(4295098372, 38654705665, 281474976776212, 283467841601537); - assert_eq!(r, transmute(lasx_xvdiv_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvdiv_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4401,7 +5238,13 @@ unsafe fn test_lasx_xvdiv_wu() { ); let r = i64x4::new(0, 1, 46, 4294967299); - assert_eq!(r, transmute(lasx_xvdiv_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvdiv_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4420,7 +5263,13 @@ unsafe fn test_lasx_xvdiv_du() { ); let r = i64x4::new(0, 0, 1, 6); - assert_eq!(r, transmute(lasx_xvdiv_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvdiv_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4440,7 +5289,13 @@ unsafe fn test_lasx_xvhaddw_h_b() { -18859072538017839, ); - assert_eq!(r, transmute(lasx_xvhaddw_h_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvhaddw_h_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4460,7 +5315,13 @@ unsafe fn test_lasx_xvhaddw_w_h() { -36597416302335, ); - assert_eq!(r, transmute(lasx_xvhaddw_w_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvhaddw_w_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4487,7 +5348,13 @@ unsafe fn test_lasx_xvhaddw_d_w() { ); let r = i64x4::new(1043954543, 64421064, -1003667433, -119821715); - assert_eq!(r, transmute(lasx_xvhaddw_d_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvhaddw_d_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4507,7 +5374,13 @@ unsafe fn test_lasx_xvhaddw_hu_bu() { 56014362196705476, ); - assert_eq!(r, transmute(lasx_xvhaddw_hu_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvhaddw_hu_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4527,7 +5400,13 @@ unsafe fn test_lasx_xvhaddw_wu_hu() { 392255068231306, ); - assert_eq!(r, transmute(lasx_xvhaddw_wu_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvhaddw_wu_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4542,7 +5421,13 @@ unsafe fn test_lasx_xvhaddw_du_wu() { ); let r = i64x4::new(2983569336, 4514288382, 2479696956, 1680431840); - assert_eq!(r, transmute(lasx_xvhaddw_du_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvhaddw_du_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4562,7 +5447,13 @@ unsafe fn test_lasx_xvhsubw_h_b() { -21955597927907350, ); - assert_eq!(r, transmute(lasx_xvhsubw_h_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvhsubw_h_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4582,7 +5473,13 @@ unsafe fn test_lasx_xvhsubw_w_h() { -108800111503156, ); - assert_eq!(r, transmute(lasx_xvhsubw_w_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvhsubw_w_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4603,7 +5500,13 @@ unsafe fn test_lasx_xvhsubw_d_w() { ); let r = i64x4::new(2748898148, -45146293, 958916832, 1285325893); - assert_eq!(r, transmute(lasx_xvhsubw_d_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvhsubw_d_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4623,7 +5526,13 @@ unsafe fn test_lasx_xvhsubw_hu_bu() { 9289103727198239, ); - assert_eq!(r, transmute(lasx_xvhsubw_hu_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvhsubw_hu_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4643,7 +5552,13 @@ unsafe fn test_lasx_xvhsubw_wu_hu() { 32018981198856, ); - assert_eq!(r, transmute(lasx_xvhsubw_wu_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvhsubw_wu_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4658,7 +5573,13 @@ unsafe fn test_lasx_xvhsubw_du_wu() { ); let r = i64x4::new(-1056733131, -2613149992, 384615677, -1588276541); - assert_eq!(r, transmute(lasx_xvhsubw_du_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvhsubw_du_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4678,7 +5599,13 @@ unsafe fn test_lasx_xvmod_b() { -48385121157714142, ); - assert_eq!(r, transmute(lasx_xvmod_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmod_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4698,7 +5625,13 @@ unsafe fn test_lasx_xvmod_h() { -194216204870745003, ); - assert_eq!(r, transmute(lasx_xvmod_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmod_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4723,8 +5656,14 @@ unsafe fn test_lasx_xvmod_w() { -1953977774316925897, 807808928635455307, ); - - assert_eq!(r, transmute(lasx_xvmod_w(transmute(a), transmute(b)))); + + assert_eq!( + r, + transmute(lasx_xvmod_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4748,7 +5687,13 @@ unsafe fn test_lasx_xvmod_d() { -3048989907394276239, ); - assert_eq!(r, transmute(lasx_xvmod_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmod_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4768,7 +5713,13 @@ unsafe fn test_lasx_xvmod_bu() { 5417620637589803790, ); - assert_eq!(r, transmute(lasx_xvmod_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmod_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4788,7 +5739,13 @@ unsafe fn test_lasx_xvmod_hu() { 129490854556368167, ); - assert_eq!(r, transmute(lasx_xvmod_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmod_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4808,7 +5765,13 @@ unsafe fn test_lasx_xvmod_wu() { 480682694340619302, ); - assert_eq!(r, transmute(lasx_xvmod_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmod_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4832,7 +5795,13 @@ unsafe fn test_lasx_xvmod_du() { 150087784552479859, ); - assert_eq!(r, transmute(lasx_xvmod_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmod_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4848,7 +5817,10 @@ unsafe fn test_lasx_xvrepl128vei_b() { 8970181431921507452, ); - assert_eq!(r, transmute(lasx_xvrepl128vei_b::<8>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvrepl128vei_b::<8>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -4864,7 +5836,10 @@ unsafe fn test_lasx_xvrepl128vei_h() { -3904680457625679409, ); - assert_eq!(r, transmute(lasx_xvrepl128vei_h::<3>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvrepl128vei_h::<3>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -4886,7 +5861,10 @@ unsafe fn test_lasx_xvrepl128vei_w() { -1327396365108239351, ); - assert_eq!(r, transmute(lasx_xvrepl128vei_w::<1>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvrepl128vei_w::<1>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -4904,7 +5882,10 @@ unsafe fn test_lasx_xvrepl128vei_d() { 4427502889722976813, ); - assert_eq!(r, transmute(lasx_xvrepl128vei_d::<0>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvrepl128vei_d::<0>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -4924,7 +5905,13 @@ unsafe fn test_lasx_xvpickev_b() { 4502896606534087725, ); - assert_eq!(r, transmute(lasx_xvpickev_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvpickev_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4944,7 +5931,13 @@ unsafe fn test_lasx_xvpickev_h() { -2117051360895385090, ); - assert_eq!(r, transmute(lasx_xvpickev_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvpickev_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -4976,7 +5969,13 @@ unsafe fn test_lasx_xvpickev_w() { -4454806063744691677, ); - assert_eq!(r, transmute(lasx_xvpickev_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvpickev_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5000,7 +5999,13 @@ unsafe fn test_lasx_xvpickev_d() { 1952973857169882715, ); - assert_eq!(r, transmute(lasx_xvpickev_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvpickev_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5020,7 +6025,13 @@ unsafe fn test_lasx_xvpickod_b() { 4092165317489988560, ); - assert_eq!(r, transmute(lasx_xvpickod_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvpickod_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5040,7 +6051,13 @@ unsafe fn test_lasx_xvpickod_h() { 5912677724127371711, ); - assert_eq!(r, transmute(lasx_xvpickod_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvpickod_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5072,7 +6089,13 @@ unsafe fn test_lasx_xvpickod_w() { 14200989743342145, ); - assert_eq!(r, transmute(lasx_xvpickod_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvpickod_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5096,7 +6119,13 @@ unsafe fn test_lasx_xvpickod_d() { 3923084493864153244, ); - assert_eq!(r, transmute(lasx_xvpickod_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvpickod_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5116,7 +6145,13 @@ unsafe fn test_lasx_xvilvh_b() { 6070396101995813657, ); - assert_eq!(r, transmute(lasx_xvilvh_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvilvh_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5136,7 +6171,13 @@ unsafe fn test_lasx_xvilvh_h() { 6944594579025051980, ); - assert_eq!(r, transmute(lasx_xvilvh_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvilvh_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5168,7 +6209,13 @@ unsafe fn test_lasx_xvilvh_w() { 2557948893958412086, ); - assert_eq!(r, transmute(lasx_xvilvh_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvilvh_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5192,7 +6239,13 @@ unsafe fn test_lasx_xvilvh_d() { -1576924492614617443, ); - assert_eq!(r, transmute(lasx_xvilvh_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvilvh_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5212,7 +6265,13 @@ unsafe fn test_lasx_xvilvl_b() { -1661662459983806644, ); - assert_eq!(r, transmute(lasx_xvilvl_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvilvl_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5232,7 +6291,13 @@ unsafe fn test_lasx_xvilvl_h() { -894657396213105965, ); - assert_eq!(r, transmute(lasx_xvilvl_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvilvl_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5264,7 +6329,13 @@ unsafe fn test_lasx_xvilvl_w() { 6940426927105417163, ); - assert_eq!(r, transmute(lasx_xvilvl_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvilvl_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5288,7 +6359,13 @@ unsafe fn test_lasx_xvilvl_d() { -2688716944239585727, ); - assert_eq!(r, transmute(lasx_xvilvl_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvilvl_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5308,7 +6385,13 @@ unsafe fn test_lasx_xvpackev_b() { -9004682544879989266, ); - assert_eq!(r, transmute(lasx_xvpackev_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvpackev_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5328,7 +6411,13 @@ unsafe fn test_lasx_xvpackev_h() { -5280992525495869891, ); - assert_eq!(r, transmute(lasx_xvpackev_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvpackev_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5360,7 +6449,13 @@ unsafe fn test_lasx_xvpackev_w() { 338692385926626324, ); - assert_eq!(r, transmute(lasx_xvpackev_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvpackev_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5384,7 +6479,13 @@ unsafe fn test_lasx_xvpackev_d() { -3601691172781761847, ); - assert_eq!(r, transmute(lasx_xvpackev_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvpackev_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5404,7 +6505,13 @@ unsafe fn test_lasx_xvpackod_b() { 3700670962761760653, ); - assert_eq!(r, transmute(lasx_xvpackod_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvpackod_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5424,7 +6531,13 @@ unsafe fn test_lasx_xvpackod_h() { -5523279134117035742, ); - assert_eq!(r, transmute(lasx_xvpackod_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvpackod_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5456,7 +6569,13 @@ unsafe fn test_lasx_xvpackod_w() { -7292079267755798519, ); - assert_eq!(r, transmute(lasx_xvpackod_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvpackod_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5480,7 +6599,13 @@ unsafe fn test_lasx_xvpackod_d() { -8628096693516187272, ); - assert_eq!(r, transmute(lasx_xvpackod_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvpackod_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5506,7 +6631,11 @@ unsafe fn test_lasx_xvshuf_b() { assert_eq!( r, - transmute(lasx_xvshuf_b(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvshuf_b( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -5530,7 +6659,11 @@ unsafe fn test_lasx_xvshuf_h() { assert_eq!( r, - transmute(lasx_xvshuf_h(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvshuf_h( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -5566,7 +6699,11 @@ unsafe fn test_lasx_xvshuf_w() { assert_eq!( r, - transmute(lasx_xvshuf_w(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvshuf_w( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -5594,7 +6731,11 @@ unsafe fn test_lasx_xvshuf_d() { assert_eq!( r, - transmute(lasx_xvshuf_d(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvshuf_d( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -5615,7 +6756,13 @@ unsafe fn test_lasx_xvand_v() { -7998109804568426495, ); - assert_eq!(r, transmute(lasx_xvand_v(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvand_v( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5631,7 +6778,7 @@ unsafe fn test_lasx_xvandi_b() { 793492300495455493, ); - assert_eq!(r, transmute(lasx_xvandi_b::<47>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvandi_b::<47>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -5651,7 +6798,13 @@ unsafe fn test_lasx_xvor_v() { -198266276987019378, ); - assert_eq!(r, transmute(lasx_xvor_v(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvor_v( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5667,7 +6820,7 @@ unsafe fn test_lasx_xvori_b() { 8466485259632311926, ); - assert_eq!(r, transmute(lasx_xvori_b::<116>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvori_b::<116>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -5687,7 +6840,13 @@ unsafe fn test_lasx_xvnor_v() { -8601510250130767824, ); - assert_eq!(r, transmute(lasx_xvnor_v(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvnor_v( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5703,7 +6862,7 @@ unsafe fn test_lasx_xvnori_b() { 6053994920729270286, ); - assert_eq!(r, transmute(lasx_xvnori_b::<161>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvnori_b::<161>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -5723,7 +6882,13 @@ unsafe fn test_lasx_xvxor_v() { 4786489823605581252, ); - assert_eq!(r, transmute(lasx_xvxor_v(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvxor_v( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -5739,7 +6904,7 @@ unsafe fn test_lasx_xvxori_b() { 1979210996964535887, ); - assert_eq!(r, transmute(lasx_xvxori_b::<179>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvxori_b::<179>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -5765,7 +6930,11 @@ unsafe fn test_lasx_xvbitsel_v() { assert_eq!( r, - transmute(lasx_xvbitsel_v(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvbitsel_v( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -5788,7 +6957,10 @@ unsafe fn test_lasx_xvbitseli_b() { assert_eq!( r, - transmute(lasx_xvbitseli_b::<156>(transmute(a), transmute(b))) + transmute(lasx_xvbitseli_b::<156>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -5805,7 +6977,10 @@ unsafe fn test_lasx_xvshuf4i_b() { 1357573681433480718, ); - assert_eq!(r, transmute(lasx_xvshuf4i_b::<117>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvshuf4i_b::<117>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -5821,7 +6996,10 @@ unsafe fn test_lasx_xvshuf4i_h() { 4406041774853078309, ); - assert_eq!(r, transmute(lasx_xvshuf4i_h::<125>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvshuf4i_h::<125>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -5843,7 +7021,7 @@ unsafe fn test_lasx_xvshuf4i_w() { -206225345846487261, ); - assert_eq!(r, transmute(lasx_xvshuf4i_w::<10>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvshuf4i_w::<10>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -5855,7 +7033,7 @@ unsafe fn test_lasx_xvreplgr2vr_b() { 8463800222054970741, ); - assert_eq!(r, transmute(lasx_xvreplgr2vr_b(-139770763))); + assert_eq!(r, transmute(lasx_xvreplgr2vr_b(black_box(-139770763)))); } #[simd_test(enable = "lasx")] @@ -5867,7 +7045,7 @@ unsafe fn test_lasx_xvreplgr2vr_h() { -1100020993973555013, ); - assert_eq!(r, transmute(lasx_xvreplgr2vr_h(-111546181))); + assert_eq!(r, transmute(lasx_xvreplgr2vr_h(black_box(-111546181)))); } #[simd_test(enable = "lasx")] @@ -5879,7 +7057,7 @@ unsafe fn test_lasx_xvreplgr2vr_w() { -8112237653938959659, ); - assert_eq!(r, transmute(lasx_xvreplgr2vr_w(-1888777515))); + assert_eq!(r, transmute(lasx_xvreplgr2vr_w(black_box(-1888777515)))); } #[simd_test(enable = "lasx")] @@ -5891,7 +7069,10 @@ unsafe fn test_lasx_xvreplgr2vr_d() { -1472556476011894783, ); - assert_eq!(r, transmute(lasx_xvreplgr2vr_d(-1472556476011894783))); + assert_eq!( + r, + transmute(lasx_xvreplgr2vr_d(black_box(-1472556476011894783))) + ); } #[simd_test(enable = "lasx")] @@ -5907,7 +7088,7 @@ unsafe fn test_lasx_xvpcnt_b() { 288795538114413315, ); - assert_eq!(r, transmute(lasx_xvpcnt_b(transmute(a)))); + assert_eq!(r, transmute(lasx_xvpcnt_b(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -5923,7 +7104,7 @@ unsafe fn test_lasx_xvpcnt_h() { 2251829878980617, ); - assert_eq!(r, transmute(lasx_xvpcnt_h(transmute(a)))); + assert_eq!(r, transmute(lasx_xvpcnt_h(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -5940,7 +7121,7 @@ unsafe fn test_lasx_xvpcnt_w() { ); let r = i64x4::new(77309411341, 60129542155, 73014444046, 55834574863); - assert_eq!(r, transmute(lasx_xvpcnt_w(transmute(a)))); + assert_eq!(r, transmute(lasx_xvpcnt_w(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -5953,7 +7134,7 @@ unsafe fn test_lasx_xvpcnt_d() { ); let r = i64x4::new(33, 31, 29, 33); - assert_eq!(r, transmute(lasx_xvpcnt_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvpcnt_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -5964,7 +7145,7 @@ unsafe fn test_lasx_xvclo_b() { ); let r = i64x4::new(2207613190657, 8589934592, 1103806726660, 3298568503554); - assert_eq!(r, transmute(lasx_xvclo_b(transmute(a)))); + assert_eq!(r, transmute(lasx_xvclo_b(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -5980,7 +7161,7 @@ unsafe fn test_lasx_xvclo_h() { 281479271677953, ); - assert_eq!(r, transmute(lasx_xvclo_h(transmute(a)))); + assert_eq!(r, transmute(lasx_xvclo_h(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -5997,7 +7178,7 @@ unsafe fn test_lasx_xvclo_w() { ); let r = i64x4::new(4294967299, 1, 1, 8589934593); - assert_eq!(r, transmute(lasx_xvclo_w(transmute(a)))); + assert_eq!(r, transmute(lasx_xvclo_w(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6010,7 +7191,7 @@ unsafe fn test_lasx_xvclo_d() { ); let r = i64x4::new(2, 0, 1, 0); - assert_eq!(r, transmute(lasx_xvclo_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvclo_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6021,7 +7202,7 @@ unsafe fn test_lasx_xvclz_b() { ); let r = i64x4::new(65538, 72621643502977024, 216173885920575744, 3302846693380); - assert_eq!(r, transmute(lasx_xvclz_b(transmute(a)))); + assert_eq!(r, transmute(lasx_xvclz_b(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6037,7 +7218,7 @@ unsafe fn test_lasx_xvclz_h() { 17179934721, ); - assert_eq!(r, transmute(lasx_xvclz_h(transmute(a)))); + assert_eq!(r, transmute(lasx_xvclz_h(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6054,7 +7235,7 @@ unsafe fn test_lasx_xvclz_w() { ); let r = i64x4::new(8589934592, 0, 3, 4294967296); - assert_eq!(r, transmute(lasx_xvclz_w(transmute(a)))); + assert_eq!(r, transmute(lasx_xvclz_w(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6067,7 +7248,7 @@ unsafe fn test_lasx_xvclz_d() { ); let r = i64x4::new(0, 0, 0, 1); - assert_eq!(r, transmute(lasx_xvclz_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvclz_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6087,7 +7268,13 @@ unsafe fn test_lasx_xvfadd_s() { 4545553165339792015, ); - assert_eq!(r, transmute(lasx_xvfadd_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfadd_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -6111,7 +7298,13 @@ unsafe fn test_lasx_xvfadd_d() { 4607242424158867483, ); - assert_eq!(r, transmute(lasx_xvfadd_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfadd_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -6131,7 +7324,13 @@ unsafe fn test_lasx_xvfsub_s() { -4716328899074058446, ); - assert_eq!(r, transmute(lasx_xvfsub_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfsub_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -6155,7 +7354,13 @@ unsafe fn test_lasx_xvfsub_d() { 4602885236169716939, ); - assert_eq!(r, transmute(lasx_xvfsub_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfsub_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -6175,7 +7380,13 @@ unsafe fn test_lasx_xvfmul_s() { 4412217640780718091, ); - assert_eq!(r, transmute(lasx_xvfmul_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfmul_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -6199,7 +7410,13 @@ unsafe fn test_lasx_xvfmul_d() { 4604645288864682176, ); - assert_eq!(r, transmute(lasx_xvfmul_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfmul_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -6219,7 +7436,13 @@ unsafe fn test_lasx_xvfdiv_s() { 4544549637634302505, ); - assert_eq!(r, transmute(lasx_xvfdiv_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfdiv_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -6243,7 +7466,13 @@ unsafe fn test_lasx_xvfdiv_d() { 4608170208670026319, ); - assert_eq!(r, transmute(lasx_xvfdiv_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfdiv_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -6263,7 +7492,13 @@ unsafe fn test_lasx_xvfcvt_h_s() { 4182498428240214789, ); - assert_eq!(r, transmute(lasx_xvfcvt_h_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcvt_h_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -6287,7 +7522,13 @@ unsafe fn test_lasx_xvfcvt_s_d() { 4509540616169896248, ); - assert_eq!(r, transmute(lasx_xvfcvt_s_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcvt_s_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -6307,7 +7548,13 @@ unsafe fn test_lasx_xvfmin_s() { 4470137692837414470, ); - assert_eq!(r, transmute(lasx_xvfmin_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfmin_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -6331,7 +7578,13 @@ unsafe fn test_lasx_xvfmin_d() { 4596668800324369880, ); - assert_eq!(r, transmute(lasx_xvfmin_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfmin_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -6351,7 +7604,13 @@ unsafe fn test_lasx_xvfmina_s() { 4561809912873379512, ); - assert_eq!(r, transmute(lasx_xvfmina_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfmina_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -6375,7 +7634,13 @@ unsafe fn test_lasx_xvfmina_d() { 4597161583916257152, ); - assert_eq!(r, transmute(lasx_xvfmina_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfmina_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -6395,7 +7660,13 @@ unsafe fn test_lasx_xvfmax_s() { 4574742780979947531, ); - assert_eq!(r, transmute(lasx_xvfmax_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfmax_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -6419,7 +7690,13 @@ unsafe fn test_lasx_xvfmax_d() { 4602928137069840177, ); - assert_eq!(r, transmute(lasx_xvfmax_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfmax_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -6439,7 +7716,13 @@ unsafe fn test_lasx_xvfmaxa_s() { 4527767521076114844, ); - assert_eq!(r, transmute(lasx_xvfmaxa_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfmaxa_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -6463,7 +7746,13 @@ unsafe fn test_lasx_xvfmaxa_d() { 4596362093665607644, ); - assert_eq!(r, transmute(lasx_xvfmaxa_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfmaxa_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -6474,7 +7763,7 @@ unsafe fn test_lasx_xvfclass_s() { ); let r = i64x4::new(549755814016, 549755814016, 549755814016, 549755814016); - assert_eq!(r, transmute(lasx_xvfclass_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfclass_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6487,7 +7776,7 @@ unsafe fn test_lasx_xvfclass_d() { ); let r = i64x4::new(128, 128, 128, 128); - assert_eq!(r, transmute(lasx_xvfclass_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfclass_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6503,7 +7792,7 @@ unsafe fn test_lasx_xvfsqrt_s() { 4566109703441416989, ); - assert_eq!(r, transmute(lasx_xvfsqrt_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfsqrt_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6521,7 +7810,7 @@ unsafe fn test_lasx_xvfsqrt_d() { 4601138545884238765, ); - assert_eq!(r, transmute(lasx_xvfsqrt_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfsqrt_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6537,7 +7826,7 @@ unsafe fn test_lasx_xvfrecip_s() { 4585242601638738136, ); - assert_eq!(r, transmute(lasx_xvfrecip_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfrecip_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6555,7 +7844,7 @@ unsafe fn test_lasx_xvfrecip_d() { 4611482062367896141, ); - assert_eq!(r, transmute(lasx_xvfrecip_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfrecip_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx,frecipe")] @@ -6571,7 +7860,7 @@ unsafe fn test_lasx_xvfrecipe_s() { 4728509413412007938, ); - assert_eq!(r, transmute(lasx_xvfrecipe_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfrecipe_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx,frecipe")] @@ -6589,7 +7878,7 @@ unsafe fn test_lasx_xvfrecipe_d() { 4611499011256352768, ); - assert_eq!(r, transmute(lasx_xvfrecipe_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfrecipe_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx,frecipe")] @@ -6605,7 +7894,7 @@ unsafe fn test_lasx_xvfrsqrte_s() { 4612427253546066334, ); - assert_eq!(r, transmute(lasx_xvfrsqrte_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfrsqrte_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx,frecipe")] @@ -6623,7 +7912,7 @@ unsafe fn test_lasx_xvfrsqrte_d() { 4612346183891812352, ); - assert_eq!(r, transmute(lasx_xvfrsqrte_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfrsqrte_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6634,7 +7923,7 @@ unsafe fn test_lasx_xvfrint_s() { ); let r = i64x4::new(0, 4575657222473777152, 1065353216, 4575657222473777152); - assert_eq!(r, transmute(lasx_xvfrint_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfrint_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6652,7 +7941,7 @@ unsafe fn test_lasx_xvfrint_d() { 0, ); - assert_eq!(r, transmute(lasx_xvfrint_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfrint_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6668,7 +7957,7 @@ unsafe fn test_lasx_xvfrsqrt_s() { 4651901116840286347, ); - assert_eq!(r, transmute(lasx_xvfrsqrt_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfrsqrt_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6686,7 +7975,7 @@ unsafe fn test_lasx_xvfrsqrt_d() { 4612495411087822923, ); - assert_eq!(r, transmute(lasx_xvfrsqrt_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfrsqrt_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6702,7 +7991,7 @@ unsafe fn test_lasx_xvflogb_s() { -4575657218195587072, ); - assert_eq!(r, transmute(lasx_xvflogb_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvflogb_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6720,7 +8009,7 @@ unsafe fn test_lasx_xvflogb_d() { -4616189618054758400, ); - assert_eq!(r, transmute(lasx_xvflogb_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvflogb_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6736,7 +8025,7 @@ unsafe fn test_lasx_xvfcvth_s_h() { 4931511963987271680, ); - assert_eq!(r, transmute(lasx_xvfcvth_s_h(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfcvth_s_h(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6752,7 +8041,7 @@ unsafe fn test_lasx_xvfcvth_d_s() { 4605684912954015744, ); - assert_eq!(r, transmute(lasx_xvfcvth_d_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfcvth_d_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6768,7 +8057,7 @@ unsafe fn test_lasx_xvfcvtl_s_h() { 4719033540912152576, ); - assert_eq!(r, transmute(lasx_xvfcvtl_s_h(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfcvtl_s_h(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6784,7 +8073,7 @@ unsafe fn test_lasx_xvfcvtl_d_s() { 4598772185639682048, ); - assert_eq!(r, transmute(lasx_xvfcvtl_d_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfcvtl_d_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6795,7 +8084,7 @@ unsafe fn test_lasx_xvftint_w_s() { ); let r = i64x4::new(0, 0, 1, 0); - assert_eq!(r, transmute(lasx_xvftint_w_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftint_w_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6808,7 +8097,7 @@ unsafe fn test_lasx_xvftint_l_d() { ); let r = i64x4::new(0, 0, 1, 1); - assert_eq!(r, transmute(lasx_xvftint_l_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftint_l_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6819,7 +8108,7 @@ unsafe fn test_lasx_xvftint_wu_s() { ); let r = i64x4::new(1, 4294967297, 1, 4294967297); - assert_eq!(r, transmute(lasx_xvftint_wu_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftint_wu_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6832,7 +8121,7 @@ unsafe fn test_lasx_xvftint_lu_d() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvftint_lu_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftint_lu_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6843,7 +8132,7 @@ unsafe fn test_lasx_xvftintrz_w_s() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvftintrz_w_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftintrz_w_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6856,7 +8145,7 @@ unsafe fn test_lasx_xvftintrz_l_d() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvftintrz_l_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftintrz_l_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6867,7 +8156,7 @@ unsafe fn test_lasx_xvftintrz_wu_s() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvftintrz_wu_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftintrz_wu_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6880,7 +8169,7 @@ unsafe fn test_lasx_xvftintrz_lu_d() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvftintrz_lu_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftintrz_lu_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6902,7 +8191,7 @@ unsafe fn test_lasx_xvffint_s_w() { 5669248528000103797, ); - assert_eq!(r, transmute(lasx_xvffint_s_w(transmute(a)))); + assert_eq!(r, transmute(lasx_xvffint_s_w(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6920,7 +8209,7 @@ unsafe fn test_lasx_xvffint_d_l() { -4362160337941248997, ); - assert_eq!(r, transmute(lasx_xvffint_d_l(transmute(a)))); + assert_eq!(r, transmute(lasx_xvffint_d_l(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6936,7 +8225,7 @@ unsafe fn test_lasx_xvffint_s_wu() { 5723492283472660471, ); - assert_eq!(r, transmute(lasx_xvffint_s_wu(transmute(a)))); + assert_eq!(r, transmute(lasx_xvffint_s_wu(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6954,7 +8243,7 @@ unsafe fn test_lasx_xvffint_d_lu() { 4892265567869239358, ); - assert_eq!(r, transmute(lasx_xvffint_d_lu(transmute(a)))); + assert_eq!(r, transmute(lasx_xvffint_d_lu(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -6970,7 +8259,7 @@ unsafe fn test_lasx_xvreplve_b() { -5280832617179597130, ); - assert_eq!(r, transmute(lasx_xvreplve_b(transmute(a), 5))); + assert_eq!(r, transmute(lasx_xvreplve_b(black_box(transmute(a)), 5))); } #[simd_test(enable = "lasx")] @@ -6986,7 +8275,7 @@ unsafe fn test_lasx_xvreplve_h() { -8907411554322709406, ); - assert_eq!(r, transmute(lasx_xvreplve_h(transmute(a), -5))); + assert_eq!(r, transmute(lasx_xvreplve_h(black_box(transmute(a)), -5))); } #[simd_test(enable = "lasx")] @@ -7008,7 +8297,7 @@ unsafe fn test_lasx_xvreplve_w() { -2569718735257041300, ); - assert_eq!(r, transmute(lasx_xvreplve_w(transmute(a), 1))); + assert_eq!(r, transmute(lasx_xvreplve_w(black_box(transmute(a)), 1))); } #[simd_test(enable = "lasx")] @@ -7026,7 +8315,7 @@ unsafe fn test_lasx_xvreplve_d() { -7945890434069746992, ); - assert_eq!(r, transmute(lasx_xvreplve_d(transmute(a), -6))); + assert_eq!(r, transmute(lasx_xvreplve_d(black_box(transmute(a)), -6))); } #[simd_test(enable = "lasx")] @@ -7060,7 +8349,10 @@ unsafe fn test_lasx_xvpermi_w() { assert_eq!( r, - transmute(lasx_xvpermi_w::<217>(transmute(a), transmute(b))) + transmute(lasx_xvpermi_w::<217>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -7081,7 +8373,13 @@ unsafe fn test_lasx_xvandn_v() { 5350223724150917, ); - assert_eq!(r, transmute(lasx_xvandn_v(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvandn_v( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7097,7 +8395,7 @@ unsafe fn test_lasx_xvneg_b() { -5388239603749330053, ); - assert_eq!(r, transmute(lasx_xvneg_b(transmute(a)))); + assert_eq!(r, transmute(lasx_xvneg_b(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -7113,7 +8411,7 @@ unsafe fn test_lasx_xvneg_h() { 5510114370614593991, ); - assert_eq!(r, transmute(lasx_xvneg_h(transmute(a)))); + assert_eq!(r, transmute(lasx_xvneg_h(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -7135,7 +8433,7 @@ unsafe fn test_lasx_xvneg_w() { -6240794077010148150, ); - assert_eq!(r, transmute(lasx_xvneg_w(transmute(a)))); + assert_eq!(r, transmute(lasx_xvneg_w(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -7153,7 +8451,7 @@ unsafe fn test_lasx_xvneg_d() { -906750919774206543, ); - assert_eq!(r, transmute(lasx_xvneg_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvneg_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -7173,7 +8471,13 @@ unsafe fn test_lasx_xvmuh_b() { 131228860074087168, ); - assert_eq!(r, transmute(lasx_xvmuh_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmuh_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7193,7 +8497,13 @@ unsafe fn test_lasx_xvmuh_h() { -14890625691814142, ); - assert_eq!(r, transmute(lasx_xvmuh_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmuh_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7225,7 +8535,13 @@ unsafe fn test_lasx_xvmuh_w() { 15710306989437773, ); - assert_eq!(r, transmute(lasx_xvmuh_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmuh_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7249,7 +8565,13 @@ unsafe fn test_lasx_xvmuh_d() { 273863514955286020, ); - assert_eq!(r, transmute(lasx_xvmuh_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmuh_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7268,8 +8590,14 @@ unsafe fn test_lasx_xvmuh_bu() { 4579080056940291892, 442221464076014683, ); - - assert_eq!(r, transmute(lasx_xvmuh_bu(transmute(a), transmute(b)))); + + assert_eq!( + r, + transmute(lasx_xvmuh_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7289,7 +8617,13 @@ unsafe fn test_lasx_xvmuh_hu() { 108786773599653576, ); - assert_eq!(r, transmute(lasx_xvmuh_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmuh_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7309,7 +8643,13 @@ unsafe fn test_lasx_xvmuh_wu() { 3278999485098399815, ); - assert_eq!(r, transmute(lasx_xvmuh_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmuh_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7333,7 +8673,13 @@ unsafe fn test_lasx_xvmuh_du() { 1569823798457591419, ); - assert_eq!(r, transmute(lasx_xvmuh_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmuh_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7349,7 +8695,10 @@ unsafe fn test_lasx_xvsllwil_h_b() { 283732621893107440, ); - assert_eq!(r, transmute(lasx_xvsllwil_h_b::<4>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvsllwil_h_b::<4>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -7365,7 +8714,10 @@ unsafe fn test_lasx_xvsllwil_w_h() { -19087521822982144, ); - assert_eq!(r, transmute(lasx_xvsllwil_w_h::<11>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvsllwil_w_h::<11>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -7387,7 +8739,10 @@ unsafe fn test_lasx_xvsllwil_d_w() { -21769464725504, ); - assert_eq!(r, transmute(lasx_xvsllwil_d_w::<14>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvsllwil_d_w::<14>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -7403,7 +8758,10 @@ unsafe fn test_lasx_xvsllwil_hu_bu() { 180156217344131904, ); - assert_eq!(r, transmute(lasx_xvsllwil_hu_bu::<5>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvsllwil_hu_bu::<5>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -7419,7 +8777,10 @@ unsafe fn test_lasx_xvsllwil_wu_hu() { 3493526673607606272, ); - assert_eq!(r, transmute(lasx_xvsllwil_wu_hu::<14>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvsllwil_wu_hu::<14>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -7435,7 +8796,10 @@ unsafe fn test_lasx_xvsllwil_du_wu() { 147522340803051520, ); - assert_eq!(r, transmute(lasx_xvsllwil_du_wu::<28>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvsllwil_du_wu::<28>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -7450,7 +8814,13 @@ unsafe fn test_lasx_xvsran_b_h() { ); let r = i64x4::new(-5107013816536599300, 0, -576745268203292981, 0); - assert_eq!(r, transmute(lasx_xvsran_b_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsran_b_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7477,7 +8847,13 @@ unsafe fn test_lasx_xvsran_h_w() { ); let r = i64x4::new(-7492863874014043255, 0, -5145548381371170633, 0); - assert_eq!(r, transmute(lasx_xvsran_h_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsran_h_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7496,7 +8872,13 @@ unsafe fn test_lasx_xvsran_w_d() { ); let r = i64x4::new(58054624080, 0, 1863787881113495402, 0); - assert_eq!(r, transmute(lasx_xvsran_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsran_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7511,7 +8893,13 @@ unsafe fn test_lasx_xvssran_b_h() { ); let r = i64x4::new(179865806513864501, 0, -9222296776751415043, 0); - assert_eq!(r, transmute(lasx_xvssran_b_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssran_b_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7538,7 +8926,13 @@ unsafe fn test_lasx_xvssran_h_w() { ); let r = i64x4::new(281015415144451, 0, 281472829161978, 0); - assert_eq!(r, transmute(lasx_xvssran_h_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssran_h_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7557,7 +8951,13 @@ unsafe fn test_lasx_xvssran_w_d() { ); let r = i64x4::new(-109363692856335914, 0, -713658208354305, 0); - assert_eq!(r, transmute(lasx_xvssran_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssran_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7572,7 +8972,13 @@ unsafe fn test_lasx_xvssran_bu_h() { ); let r = i64x4::new(144116287595479055, 0, 71776131929997312, 0); - assert_eq!(r, transmute(lasx_xvssran_bu_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssran_bu_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7587,7 +8993,13 @@ unsafe fn test_lasx_xvssran_hu_w() { ); let r = i64x4::new(254837589540863, 0, 281470681765343, 0); - assert_eq!(r, transmute(lasx_xvssran_hu_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssran_hu_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7606,7 +9018,13 @@ unsafe fn test_lasx_xvssran_wu_d() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvssran_wu_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssran_wu_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7621,7 +9039,13 @@ unsafe fn test_lasx_xvsrarn_b_h() { ); let r = i64x4::new(-7204067930850651184, 0, -5909457163402939758, 0); - assert_eq!(r, transmute(lasx_xvsrarn_b_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsrarn_b_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7648,7 +9072,13 @@ unsafe fn test_lasx_xvsrarn_h_w() { ); let r = i64x4::new(4021320339558432771, 0, -5499970420202995712, 0); - assert_eq!(r, transmute(lasx_xvsrarn_h_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsrarn_h_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7667,7 +9097,13 @@ unsafe fn test_lasx_xvsrarn_w_d() { ); let r = i64x4::new(-69752906595470, 0, -7240468610764767136, 0); - assert_eq!(r, transmute(lasx_xvsrarn_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsrarn_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7682,7 +9118,13 @@ unsafe fn test_lasx_xvssrarn_b_h() { ); let r = i64x4::new(142413695971000447, 0, -141179869986524, 0); - assert_eq!(r, transmute(lasx_xvssrarn_b_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssrarn_b_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7709,7 +9151,13 @@ unsafe fn test_lasx_xvssrarn_h_w() { ); let r = i64x4::new(-10414028872220672, 0, 9223104806137135104, 0); - assert_eq!(r, transmute(lasx_xvssrarn_h_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssrarn_h_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7728,7 +9176,13 @@ unsafe fn test_lasx_xvssrarn_w_d() { ); let r = i64x4::new(2147483648, 0, 326062786704572415, 0); - assert_eq!(r, transmute(lasx_xvssrarn_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssrarn_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7743,7 +9197,13 @@ unsafe fn test_lasx_xvssrarn_bu_h() { ); let r = i64x4::new(4286578689, 0, 8163878114427135, 0); - assert_eq!(r, transmute(lasx_xvssrarn_bu_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssrarn_bu_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7758,7 +9218,13 @@ unsafe fn test_lasx_xvssrarn_hu_w() { ); let r = i64x4::new(-281474976710656, 0, 2199023255552, 0); - assert_eq!(r, transmute(lasx_xvssrarn_hu_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssrarn_hu_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7777,7 +9243,13 @@ unsafe fn test_lasx_xvssrarn_wu_d() { ); let r = i64x4::new(-3539373509, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvssrarn_wu_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssrarn_wu_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7792,7 +9264,13 @@ unsafe fn test_lasx_xvsrln_b_h() { ); let r = i64x4::new(776589499955319005, 0, 285495199351976, 0); - assert_eq!(r, transmute(lasx_xvsrln_b_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsrln_b_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7819,7 +9297,13 @@ unsafe fn test_lasx_xvsrln_h_w() { ); let r = i64x4::new(-6090306652816735409, 0, -1175228277373752196, 0); - assert_eq!(r, transmute(lasx_xvsrln_h_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsrln_h_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7838,7 +9322,13 @@ unsafe fn test_lasx_xvsrln_w_d() { ); let r = i64x4::new(262796920316080678, 0, 1866060245111069, 0); - assert_eq!(r, transmute(lasx_xvsrln_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsrln_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7853,7 +9343,13 @@ unsafe fn test_lasx_xvssrln_bu_h() { ); let r = i64x4::new(-996419305685, 0, -71773920038018305, 0); - assert_eq!(r, transmute(lasx_xvssrln_bu_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssrln_bu_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7867,7 +9363,13 @@ unsafe fn test_lasx_xvssrln_hu_w() { ); let r = i64x4::new(2319476961249468, 0, 208855326080470286, 0); - assert_eq!(r, transmute(lasx_xvssrln_hu_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssrln_hu_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7886,7 +9388,13 @@ unsafe fn test_lasx_xvssrln_wu_d() { ); let r = i64x4::new(-1, 0, -1, 0); - assert_eq!(r, transmute(lasx_xvssrln_wu_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssrln_wu_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7901,7 +9409,13 @@ unsafe fn test_lasx_xvsrlrn_b_h() { ); let r = i64x4::new(-6693460433276960310, 0, -6122543899663285619, 0); - assert_eq!(r, transmute(lasx_xvsrlrn_b_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsrlrn_b_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7928,7 +9442,13 @@ unsafe fn test_lasx_xvsrlrn_h_w() { ); let r = i64x4::new(390723813551243448, 0, 6015496732136052023, 0); - assert_eq!(r, transmute(lasx_xvsrlrn_h_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsrlrn_h_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7947,7 +9467,13 @@ unsafe fn test_lasx_xvsrlrn_w_d() { ); let r = i64x4::new(4295025675, 0, -3281590872273059757, 0); - assert_eq!(r, transmute(lasx_xvsrlrn_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsrlrn_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7962,7 +9488,13 @@ unsafe fn test_lasx_xvssrlrn_bu_h() { ); let r = i64x4::new(-258385232527491, 0, 4034951496335359804, 0); - assert_eq!(r, transmute(lasx_xvssrlrn_bu_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssrlrn_bu_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7977,7 +9509,13 @@ unsafe fn test_lasx_xvssrlrn_hu_w() { ); let r = i64x4::new(-3854303052, 0, -4029743103, 0); - assert_eq!(r, transmute(lasx_xvssrlrn_hu_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssrlrn_hu_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -7996,7 +9534,13 @@ unsafe fn test_lasx_xvssrlrn_wu_d() { ); let r = i64x4::new(-3223981555, 0, 35952127557763071, 0); - assert_eq!(r, transmute(lasx_xvssrlrn_wu_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssrlrn_wu_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -8018,7 +9562,10 @@ unsafe fn test_lasx_xvfrstpi_b() { assert_eq!( r, - transmute(lasx_xvfrstpi_b::<24>(transmute(a), transmute(b))) + transmute(lasx_xvfrstpi_b::<24>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -8041,7 +9588,10 @@ unsafe fn test_lasx_xvfrstpi_h() { assert_eq!( r, - transmute(lasx_xvfrstpi_h::<10>(transmute(a), transmute(b))) + transmute(lasx_xvfrstpi_h::<10>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -8068,7 +9618,11 @@ unsafe fn test_lasx_xvfrstp_b() { assert_eq!( r, - transmute(lasx_xvfrstp_b(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvfrstp_b( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -8095,7 +9649,11 @@ unsafe fn test_lasx_xvfrstp_h() { assert_eq!( r, - transmute(lasx_xvfrstp_h(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvfrstp_h( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -8122,7 +9680,10 @@ unsafe fn test_lasx_xvshuf4i_d() { assert_eq!( r, - transmute(lasx_xvshuf4i_d::<115>(transmute(a), transmute(b))) + transmute(lasx_xvshuf4i_d::<115>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -8139,7 +9700,7 @@ unsafe fn test_lasx_xvbsrl_v() { 8842437361645499941, ); - assert_eq!(r, transmute(lasx_xvbsrl_v::<0>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvbsrl_v::<0>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8155,7 +9716,7 @@ unsafe fn test_lasx_xvbsll_v() { 5030360181484275352, ); - assert_eq!(r, transmute(lasx_xvbsll_v::<0>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvbsll_v::<0>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8177,7 +9738,10 @@ unsafe fn test_lasx_xvextrins_b() { assert_eq!( r, - transmute(lasx_xvextrins_b::<69>(transmute(a), transmute(b))) + transmute(lasx_xvextrins_b::<69>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -8200,7 +9764,10 @@ unsafe fn test_lasx_xvextrins_h() { assert_eq!( r, - transmute(lasx_xvextrins_h::<190>(transmute(a), transmute(b))) + transmute(lasx_xvextrins_h::<190>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -8235,7 +9802,10 @@ unsafe fn test_lasx_xvextrins_w() { assert_eq!( r, - transmute(lasx_xvextrins_w::<133>(transmute(a), transmute(b))) + transmute(lasx_xvextrins_w::<133>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -8262,7 +9832,10 @@ unsafe fn test_lasx_xvextrins_d() { assert_eq!( r, - transmute(lasx_xvextrins_d::<210>(transmute(a), transmute(b))) + transmute(lasx_xvextrins_d::<210>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -8274,7 +9847,7 @@ unsafe fn test_lasx_xvmskltz_b() { ); let r = i64x4::new(5684, 0, 36244, 0); - assert_eq!(r, transmute(lasx_xvmskltz_b(transmute(a)))); + assert_eq!(r, transmute(lasx_xvmskltz_b(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8285,7 +9858,7 @@ unsafe fn test_lasx_xvmskltz_h() { ); let r = i64x4::new(225, 0, 96, 0); - assert_eq!(r, transmute(lasx_xvmskltz_h(transmute(a)))); + assert_eq!(r, transmute(lasx_xvmskltz_h(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8302,7 +9875,7 @@ unsafe fn test_lasx_xvmskltz_w() { ); let r = i64x4::new(13, 0, 10, 0); - assert_eq!(r, transmute(lasx_xvmskltz_w(transmute(a)))); + assert_eq!(r, transmute(lasx_xvmskltz_w(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8315,7 +9888,7 @@ unsafe fn test_lasx_xvmskltz_d() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvmskltz_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvmskltz_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8335,7 +9908,13 @@ unsafe fn test_lasx_xvsigncov_b() { -6215157037026399088, ); - assert_eq!(r, transmute(lasx_xvsigncov_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsigncov_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -8355,7 +9934,13 @@ unsafe fn test_lasx_xvsigncov_h() { 2866604565619890601, ); - assert_eq!(r, transmute(lasx_xvsigncov_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsigncov_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -8387,7 +9972,13 @@ unsafe fn test_lasx_xvsigncov_w() { -180354238538399451, ); - assert_eq!(r, transmute(lasx_xvsigncov_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsigncov_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -8411,7 +10002,13 @@ unsafe fn test_lasx_xvsigncov_d() { 293290471183495768, ); - assert_eq!(r, transmute(lasx_xvsigncov_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsigncov_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -8437,7 +10034,11 @@ unsafe fn test_lasx_xvfmadd_s() { assert_eq!( r, - transmute(lasx_xvfmadd_s(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvfmadd_s( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -8470,7 +10071,11 @@ unsafe fn test_lasx_xvfmadd_d() { assert_eq!( r, - transmute(lasx_xvfmadd_d(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvfmadd_d( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -8497,7 +10102,11 @@ unsafe fn test_lasx_xvfmsub_s() { assert_eq!( r, - transmute(lasx_xvfmsub_s(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvfmsub_s( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -8530,7 +10139,11 @@ unsafe fn test_lasx_xvfmsub_d() { assert_eq!( r, - transmute(lasx_xvfmsub_d(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvfmsub_d( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -8557,7 +10170,11 @@ unsafe fn test_lasx_xvfnmadd_s() { assert_eq!( r, - transmute(lasx_xvfnmadd_s(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvfnmadd_s( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -8590,7 +10207,11 @@ unsafe fn test_lasx_xvfnmadd_d() { assert_eq!( r, - transmute(lasx_xvfnmadd_d(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvfnmadd_d( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -8617,7 +10238,11 @@ unsafe fn test_lasx_xvfnmsub_s() { assert_eq!( r, - transmute(lasx_xvfnmsub_s(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvfnmsub_s( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -8650,7 +10275,11 @@ unsafe fn test_lasx_xvfnmsub_d() { assert_eq!( r, - transmute(lasx_xvfnmsub_d(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvfnmsub_d( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -8662,7 +10291,7 @@ unsafe fn test_lasx_xvftintrne_w_s() { ); let r = i64x4::new(1, 0, 1, 4294967297); - assert_eq!(r, transmute(lasx_xvftintrne_w_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftintrne_w_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8675,7 +10304,7 @@ unsafe fn test_lasx_xvftintrne_l_d() { ); let r = i64x4::new(0, 1, 1, 0); - assert_eq!(r, transmute(lasx_xvftintrne_l_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftintrne_l_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8686,7 +10315,7 @@ unsafe fn test_lasx_xvftintrp_w_s() { ); let r = i64x4::new(4294967297, 4294967297, 4294967297, 4294967297); - assert_eq!(r, transmute(lasx_xvftintrp_w_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftintrp_w_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8699,7 +10328,7 @@ unsafe fn test_lasx_xvftintrp_l_d() { ); let r = i64x4::new(1, 1, 1, 1); - assert_eq!(r, transmute(lasx_xvftintrp_l_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftintrp_l_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8710,7 +10339,7 @@ unsafe fn test_lasx_xvftintrm_w_s() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvftintrm_w_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftintrm_w_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8723,7 +10352,7 @@ unsafe fn test_lasx_xvftintrm_l_d() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvftintrm_l_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftintrm_l_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8742,7 +10371,13 @@ unsafe fn test_lasx_xvftint_w_d() { ); let r = i64x4::new(0, 0, 4294967297, 4294967296); - assert_eq!(r, transmute(lasx_xvftint_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvftint_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -8766,7 +10401,13 @@ unsafe fn test_lasx_xvffint_s_l() { -2383622820954443903, ); - assert_eq!(r, transmute(lasx_xvffint_s_l(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvffint_s_l( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -8785,7 +10426,13 @@ unsafe fn test_lasx_xvftintrz_w_d() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvftintrz_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvftintrz_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -8804,7 +10451,13 @@ unsafe fn test_lasx_xvftintrp_w_d() { ); let r = i64x4::new(4294967297, 4294967297, 4294967297, 4294967297); - assert_eq!(r, transmute(lasx_xvftintrp_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvftintrp_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -8823,7 +10476,13 @@ unsafe fn test_lasx_xvftintrm_w_d() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvftintrm_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvftintrm_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -8844,7 +10503,10 @@ unsafe fn test_lasx_xvftintrne_w_d() { assert_eq!( r, - transmute(lasx_xvftintrne_w_d(transmute(a), transmute(b))) + transmute(lasx_xvftintrne_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -8856,7 +10518,7 @@ unsafe fn test_lasx_xvftinth_l_s() { ); let r = i64x4::new(0, 1, 0, 1); - assert_eq!(r, transmute(lasx_xvftinth_l_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftinth_l_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8867,7 +10529,7 @@ unsafe fn test_lasx_xvftintl_l_s() { ); let r = i64x4::new(0, 0, 0, 1); - assert_eq!(r, transmute(lasx_xvftintl_l_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftintl_l_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8889,7 +10551,7 @@ unsafe fn test_lasx_xvffinth_d_w() { -4485741486683455488, ); - assert_eq!(r, transmute(lasx_xvffinth_d_w(transmute(a)))); + assert_eq!(r, transmute(lasx_xvffinth_d_w(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8911,7 +10573,7 @@ unsafe fn test_lasx_xvffintl_d_w() { -4489746915386195968, ); - assert_eq!(r, transmute(lasx_xvffintl_d_w(transmute(a)))); + assert_eq!(r, transmute(lasx_xvffintl_d_w(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8922,7 +10584,7 @@ unsafe fn test_lasx_xvftintrzh_l_s() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvftintrzh_l_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftintrzh_l_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8933,7 +10595,7 @@ unsafe fn test_lasx_xvftintrzl_l_s() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvftintrzl_l_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftintrzl_l_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8944,7 +10606,7 @@ unsafe fn test_lasx_xvftintrph_l_s() { ); let r = i64x4::new(1, 1, 1, 1); - assert_eq!(r, transmute(lasx_xvftintrph_l_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftintrph_l_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8955,7 +10617,7 @@ unsafe fn test_lasx_xvftintrpl_l_s() { ); let r = i64x4::new(1, 1, 1, 1); - assert_eq!(r, transmute(lasx_xvftintrpl_l_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftintrpl_l_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8966,7 +10628,7 @@ unsafe fn test_lasx_xvftintrmh_l_s() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvftintrmh_l_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftintrmh_l_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8977,7 +10639,7 @@ unsafe fn test_lasx_xvftintrml_l_s() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvftintrml_l_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftintrml_l_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8988,7 +10650,7 @@ unsafe fn test_lasx_xvftintrneh_l_s() { ); let r = i64x4::new(1, 0, 0, 1); - assert_eq!(r, transmute(lasx_xvftintrneh_l_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftintrneh_l_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -8999,7 +10661,7 @@ unsafe fn test_lasx_xvftintrnel_l_s() { ); let r = i64x4::new(0, 1, 1, 0); - assert_eq!(r, transmute(lasx_xvftintrnel_l_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvftintrnel_l_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9015,7 +10677,7 @@ unsafe fn test_lasx_xvfrintrne_s() { 1065353216, ); - assert_eq!(r, transmute(lasx_xvfrintrne_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfrintrne_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9028,7 +10690,7 @@ unsafe fn test_lasx_xvfrintrne_d() { ); let r = i64x4::new(0, 0, 4607182418800017408, 0); - assert_eq!(r, transmute(lasx_xvfrintrne_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfrintrne_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9039,7 +10701,7 @@ unsafe fn test_lasx_xvfrintrz_s() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvfrintrz_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfrintrz_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9052,7 +10714,7 @@ unsafe fn test_lasx_xvfrintrz_d() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvfrintrz_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfrintrz_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9068,7 +10730,7 @@ unsafe fn test_lasx_xvfrintrp_s() { 4575657222473777152, ); - assert_eq!(r, transmute(lasx_xvfrintrp_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfrintrp_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9086,7 +10748,7 @@ unsafe fn test_lasx_xvfrintrp_d() { 4607182418800017408, ); - assert_eq!(r, transmute(lasx_xvfrintrp_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfrintrp_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9097,7 +10759,7 @@ unsafe fn test_lasx_xvfrintrm_s() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvfrintrm_s(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfrintrm_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9110,7 +10772,7 @@ unsafe fn test_lasx_xvfrintrm_d() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvfrintrm_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvfrintrm_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9146,7 +10808,7 @@ unsafe fn test_lasx_xvst() { -1239470096778490055, ); - lasx_xvst::<0>(transmute(a), o.as_mut_ptr()); + lasx_xvst::<0>(black_box(transmute(a)), o.as_mut_ptr()); assert_eq!(r, transmute(o)); } @@ -9167,7 +10829,7 @@ unsafe fn test_lasx_xvstelm_b() { -1243134694581333281, ); - lasx_xvstelm_b::<0, 9>(transmute(a), o.as_mut_ptr()); + lasx_xvstelm_b::<0, 9>(black_box(transmute(a)), o.as_mut_ptr()); assert_eq!(r, transmute(o)); } @@ -9188,7 +10850,7 @@ unsafe fn test_lasx_xvstelm_h() { 4649151313692342074, ); - lasx_xvstelm_h::<0, 6>(transmute(a), o.as_mut_ptr()); + lasx_xvstelm_h::<0, 6>(black_box(transmute(a)), o.as_mut_ptr()); assert_eq!(r, transmute(o)); } @@ -9215,7 +10877,7 @@ unsafe fn test_lasx_xvstelm_w() { 5471549130760739388, ); - lasx_xvstelm_w::<0, 3>(transmute(a), o.as_mut_ptr()); + lasx_xvstelm_w::<0, 3>(black_box(transmute(a)), o.as_mut_ptr()); assert_eq!(r, transmute(o)); } @@ -9238,7 +10900,7 @@ unsafe fn test_lasx_xvstelm_d() { -4006899083251152793, ); - lasx_xvstelm_d::<0, 0>(transmute(a), o.as_mut_ptr()); + lasx_xvstelm_d::<0, 0>(black_box(transmute(a)), o.as_mut_ptr()); assert_eq!(r, transmute(o)); } @@ -9273,7 +10935,10 @@ unsafe fn test_lasx_xvinsve0_w() { assert_eq!( r, - transmute(lasx_xvinsve0_w::<5>(transmute(a), transmute(b))) + transmute(lasx_xvinsve0_w::<5>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -9300,7 +10965,10 @@ unsafe fn test_lasx_xvinsve0_d() { assert_eq!( r, - transmute(lasx_xvinsve0_d::<3>(transmute(a), transmute(b))) + transmute(lasx_xvinsve0_d::<3>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -9318,7 +10986,7 @@ unsafe fn test_lasx_xvpickve_w() { ); let r = i64x4::new(1138467779, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvpickve_w::<2>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvpickve_w::<2>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9331,7 +10999,7 @@ unsafe fn test_lasx_xvpickve_d() { ); let r = i64x4::new(8402618222187512066, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvpickve_d::<0>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvpickve_d::<0>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9346,7 +11014,13 @@ unsafe fn test_lasx_xvssrlrn_b_h() { ); let r = i64x4::new(3463408299017240959, 0, 35748968851799935, 0); - assert_eq!(r, transmute(lasx_xvssrlrn_b_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssrlrn_b_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -9373,7 +11047,13 @@ unsafe fn test_lasx_xvssrlrn_h_w() { ); let r = i64x4::new(422210317549567, 0, 11259106657337343, 0); - assert_eq!(r, transmute(lasx_xvssrlrn_h_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssrlrn_h_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -9392,7 +11072,13 @@ unsafe fn test_lasx_xvssrlrn_w_d() { ); let r = i64x4::new(33428474336875, 0, 9223372034707292159, 0); - assert_eq!(r, transmute(lasx_xvssrlrn_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssrlrn_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -9407,7 +11093,13 @@ unsafe fn test_lasx_xvssrln_b_h() { ); let r = i64x4::new(657383790217428863, 0, 941881790371430152, 0); - assert_eq!(r, transmute(lasx_xvssrln_b_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssrln_b_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -9434,7 +11126,13 @@ unsafe fn test_lasx_xvssrln_h_w() { ); let r = i64x4::new(9223103287866884105, 0, 1696871892814295669, 0); - assert_eq!(r, transmute(lasx_xvssrln_h_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssrln_h_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -9453,7 +11151,13 @@ unsafe fn test_lasx_xvssrln_w_d() { ); let r = i64x4::new(3937140138060021759, 0, 9223372034707292159, 0); - assert_eq!(r, transmute(lasx_xvssrln_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvssrln_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -9473,7 +11177,13 @@ unsafe fn test_lasx_xvorn_v() { -126121887133672977, ); - assert_eq!(r, transmute(lasx_xvorn_v(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvorn_v( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -9521,7 +11231,7 @@ unsafe fn test_lasx_xvstx() { -4162173646616256791, ); - lasx_xvstx(transmute(a), o.as_mut_ptr(), 0); + lasx_xvstx(black_box(transmute(a)), o.as_mut_ptr(), 0); assert_eq!(r, transmute(o)); } @@ -9535,7 +11245,7 @@ unsafe fn test_lasx_xvextl_qu_du() { ); let r = i64x4::new(-5083351180651141737, 0, 4121325568380818738, 0); - assert_eq!(r, transmute(lasx_xvextl_qu_du(transmute(a)))); + assert_eq!(r, transmute(lasx_xvextl_qu_du(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9559,7 +11269,7 @@ unsafe fn test_lasx_xvinsgr2vr_w() { assert_eq!( r, - transmute(lasx_xvinsgr2vr_w::<4>(transmute(a), -596457645)) + transmute(lasx_xvinsgr2vr_w::<4>(black_box(transmute(a)), -596457645)) ); } @@ -9580,7 +11290,7 @@ unsafe fn test_lasx_xvinsgr2vr_d() { assert_eq!( r, - transmute(lasx_xvinsgr2vr_d::<3>(transmute(a), -1262509914)) + transmute(lasx_xvinsgr2vr_d::<3>(black_box(transmute(a)), -1262509914)) ); } @@ -9597,7 +11307,7 @@ unsafe fn test_lasx_xvreplve0_b() { 3472328296227680304, ); - assert_eq!(r, transmute(lasx_xvreplve0_b(transmute(a)))); + assert_eq!(r, transmute(lasx_xvreplve0_b(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9613,7 +11323,7 @@ unsafe fn test_lasx_xvreplve0_h() { 115969459958317468, ); - assert_eq!(r, transmute(lasx_xvreplve0_h(transmute(a)))); + assert_eq!(r, transmute(lasx_xvreplve0_h(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9635,7 +11345,7 @@ unsafe fn test_lasx_xvreplve0_w() { 5341799334363128369, ); - assert_eq!(r, transmute(lasx_xvreplve0_w(transmute(a)))); + assert_eq!(r, transmute(lasx_xvreplve0_w(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9653,7 +11363,7 @@ unsafe fn test_lasx_xvreplve0_d() { -7669512117913941619, ); - assert_eq!(r, transmute(lasx_xvreplve0_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvreplve0_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9669,7 +11379,7 @@ unsafe fn test_lasx_xvreplve0_q() { -7451765666000961269, ); - assert_eq!(r, transmute(lasx_xvreplve0_q(transmute(a)))); + assert_eq!(r, transmute(lasx_xvreplve0_q(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9685,7 +11395,7 @@ unsafe fn test_lasx_vext2xv_h_b() { 24207148650070059, ); - assert_eq!(r, transmute(lasx_vext2xv_h_b(transmute(a)))); + assert_eq!(r, transmute(lasx_vext2xv_h_b(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9701,7 +11411,7 @@ unsafe fn test_lasx_vext2xv_w_h() { -34359738358622, ); - assert_eq!(r, transmute(lasx_vext2xv_w_h(transmute(a)))); + assert_eq!(r, transmute(lasx_vext2xv_w_h(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9718,7 +11428,7 @@ unsafe fn test_lasx_vext2xv_d_w() { ); let r = i64x4::new(-585251458, -2113345963, -1846838006, -474453663); - assert_eq!(r, transmute(lasx_vext2xv_d_w(transmute(a)))); + assert_eq!(r, transmute(lasx_vext2xv_d_w(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9729,7 +11439,7 @@ unsafe fn test_lasx_vext2xv_w_b() { ); let r = i64x4::new(-240518168540, -528280977282, 30064770965, -489626271740); - assert_eq!(r, transmute(lasx_vext2xv_w_b(transmute(a)))); + assert_eq!(r, transmute(lasx_vext2xv_w_b(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9740,7 +11450,7 @@ unsafe fn test_lasx_vext2xv_d_h() { ); let r = i64x4::new(28568, -25911, 12053, -2728); - assert_eq!(r, transmute(lasx_vext2xv_d_h(transmute(a)))); + assert_eq!(r, transmute(lasx_vext2xv_d_h(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9751,7 +11461,7 @@ unsafe fn test_lasx_vext2xv_d_b() { ); let r = i64x4::new(18, 112, -36, -67); - assert_eq!(r, transmute(lasx_vext2xv_d_b(transmute(a)))); + assert_eq!(r, transmute(lasx_vext2xv_d_b(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9767,7 +11477,7 @@ unsafe fn test_lasx_vext2xv_hu_bu() { 16888898041348298, ); - assert_eq!(r, transmute(lasx_vext2xv_hu_bu(transmute(a)))); + assert_eq!(r, transmute(lasx_vext2xv_hu_bu(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9783,7 +11493,7 @@ unsafe fn test_lasx_vext2xv_wu_hu() { 225172250484459, ); - assert_eq!(r, transmute(lasx_vext2xv_wu_hu(transmute(a)))); + assert_eq!(r, transmute(lasx_vext2xv_wu_hu(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9800,7 +11510,7 @@ unsafe fn test_lasx_vext2xv_du_wu() { ); let r = i64x4::new(4027501046, 3358638690, 2495633600, 1035808674); - assert_eq!(r, transmute(lasx_vext2xv_du_wu(transmute(a)))); + assert_eq!(r, transmute(lasx_vext2xv_du_wu(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9811,7 +11521,7 @@ unsafe fn test_lasx_vext2xv_wu_bu() { ); let r = i64x4::new(987842478134, 481036337184, 266287972487, 979252543649); - assert_eq!(r, transmute(lasx_vext2xv_wu_bu(transmute(a)))); + assert_eq!(r, transmute(lasx_vext2xv_wu_bu(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9822,7 +11532,7 @@ unsafe fn test_lasx_vext2xv_du_hu() { ); let r = i64x4::new(61301, 41410, 35355, 19598); - assert_eq!(r, transmute(lasx_vext2xv_du_hu(transmute(a)))); + assert_eq!(r, transmute(lasx_vext2xv_du_hu(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9833,7 +11543,7 @@ unsafe fn test_lasx_vext2xv_du_bu() { ); let r = i64x4::new(69, 25, 36, 204); - assert_eq!(r, transmute(lasx_vext2xv_du_bu(transmute(a)))); + assert_eq!(r, transmute(lasx_vext2xv_du_bu(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9855,7 +11565,10 @@ unsafe fn test_lasx_xvpermi_q() { assert_eq!( r, - transmute(lasx_xvpermi_q::<49>(transmute(a), transmute(b))) + transmute(lasx_xvpermi_q::<49>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -9874,7 +11587,7 @@ unsafe fn test_lasx_xvpermi_d() { 1609032298240495217, ); - assert_eq!(r, transmute(lasx_xvpermi_d::<137>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvpermi_d::<137>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -9900,7 +11613,13 @@ unsafe fn test_lasx_xvperm_w() { -3042141963630030871, ); - assert_eq!(r, transmute(lasx_xvperm_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvperm_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -9981,7 +11700,10 @@ unsafe fn test_lasx_xvpickve2gr_w() { ); let r: i32 = 1367768596; - assert_eq!(r, transmute(lasx_xvpickve2gr_w::<4>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvpickve2gr_w::<4>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -9998,7 +11720,10 @@ unsafe fn test_lasx_xvpickve2gr_wu() { ); let r: u32 = 3194994707; - assert_eq!(r, transmute(lasx_xvpickve2gr_wu::<7>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvpickve2gr_wu::<7>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -10011,7 +11736,10 @@ unsafe fn test_lasx_xvpickve2gr_d() { ); let r: i64 = 6739870851682505277; - assert_eq!(r, transmute(lasx_xvpickve2gr_d::<2>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvpickve2gr_d::<2>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -10022,9 +11750,12 @@ unsafe fn test_lasx_xvpickve2gr_du() { -4272175049937479582, -8920910898336101981, ); - let r: u64 = 9525833175373449635; - - assert_eq!(r, transmute(lasx_xvpickve2gr_du::<3>(transmute(a)))); + let r: u64 = 9525833175373449635; + + assert_eq!( + r, + transmute(lasx_xvpickve2gr_du::<3>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -10043,7 +11774,13 @@ unsafe fn test_lasx_xvaddwev_q_d() { ); let r = i64x4::new(-7472750192138786681, -1, -7758725841623301722, -1); - assert_eq!(r, transmute(lasx_xvaddwev_q_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvaddwev_q_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10070,7 +11807,13 @@ unsafe fn test_lasx_xvaddwev_d_w() { ); let r = i64x4::new(614980351, -1946929141, -3309402607, -619077207); - assert_eq!(r, transmute(lasx_xvaddwev_d_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvaddwev_d_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10090,7 +11833,13 @@ unsafe fn test_lasx_xvaddwev_w_h() { -232787227420502, ); - assert_eq!(r, transmute(lasx_xvaddwev_w_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvaddwev_w_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10110,7 +11859,13 @@ unsafe fn test_lasx_xvaddwev_h_b() { -10414449598922739, ); - assert_eq!(r, transmute(lasx_xvaddwev_h_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvaddwev_h_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10129,7 +11884,13 @@ unsafe fn test_lasx_xvaddwev_q_du() { ); let r = i64x4::new(4866121314102936184, 1, 898239984703082844, 1); - assert_eq!(r, transmute(lasx_xvaddwev_q_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvaddwev_q_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10144,7 +11905,13 @@ unsafe fn test_lasx_xvaddwev_d_wu() { ); let r = i64x4::new(4001409528, 3398767892, 6021892971, 4349349069); - assert_eq!(r, transmute(lasx_xvaddwev_d_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvaddwev_d_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10164,7 +11931,13 @@ unsafe fn test_lasx_xvaddwev_w_hu() { 376479653317006, ); - assert_eq!(r, transmute(lasx_xvaddwev_w_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvaddwev_w_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10184,7 +11957,13 @@ unsafe fn test_lasx_xvaddwev_h_bu() { 68962872563859917, ); - assert_eq!(r, transmute(lasx_xvaddwev_h_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvaddwev_h_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10203,7 +11982,13 @@ unsafe fn test_lasx_xvsubwev_q_d() { ); let r = i64x4::new(8183582659207736591, -1, 5496584216395980167, -1); - assert_eq!(r, transmute(lasx_xvsubwev_q_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsubwev_q_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10230,7 +12015,13 @@ unsafe fn test_lasx_xvsubwev_d_w() { ); let r = i64x4::new(-1945765730, 1700549847, -1218066002, -827282692); - assert_eq!(r, transmute(lasx_xvsubwev_d_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsubwev_d_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10250,7 +12041,13 @@ unsafe fn test_lasx_xvsubwev_w_h() { 217514323726817, ); - assert_eq!(r, transmute(lasx_xvsubwev_w_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsubwev_w_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10270,7 +12067,13 @@ unsafe fn test_lasx_xvsubwev_h_b() { -5910188531122352, ); - assert_eq!(r, transmute(lasx_xvsubwev_h_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsubwev_h_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10289,7 +12092,13 @@ unsafe fn test_lasx_xvsubwev_q_du() { ); let r = i64x4::new(-7180841769120666233, -1, -3901807980557405007, -1); - assert_eq!(r, transmute(lasx_xvsubwev_q_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsubwev_q_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10304,7 +12113,13 @@ unsafe fn test_lasx_xvsubwev_d_wu() { ); let r = i64x4::new(-2531041484, -1085343469, -1900376905, 1600829569); - assert_eq!(r, transmute(lasx_xvsubwev_d_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsubwev_d_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10324,7 +12139,13 @@ unsafe fn test_lasx_xvsubwev_w_hu() { -117029268872947, ); - assert_eq!(r, transmute(lasx_xvsubwev_w_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsubwev_w_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10344,7 +12165,13 @@ unsafe fn test_lasx_xvsubwev_h_bu() { -7035942402260810, ); - assert_eq!(r, transmute(lasx_xvsubwev_h_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsubwev_h_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10368,7 +12195,13 @@ unsafe fn test_lasx_xvmulwev_q_d() { -2723954123981949807, ); - assert_eq!(r, transmute(lasx_xvmulwev_q_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmulwev_q_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10400,7 +12233,13 @@ unsafe fn test_lasx_xvmulwev_d_w() { 904288373202150940, ); - assert_eq!(r, transmute(lasx_xvmulwev_d_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmulwev_d_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10420,7 +12259,13 @@ unsafe fn test_lasx_xvmulwev_w_h() { -218736636965849761, ); - assert_eq!(r, transmute(lasx_xvmulwev_w_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmulwev_w_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10440,7 +12285,13 @@ unsafe fn test_lasx_xvmulwev_h_b() { -532018857412992924, ); - assert_eq!(r, transmute(lasx_xvmulwev_h_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmulwev_h_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10464,7 +12315,13 @@ unsafe fn test_lasx_xvmulwev_q_du() { 1973424773030267173, ); - assert_eq!(r, transmute(lasx_xvmulwev_q_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmulwev_q_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10484,7 +12341,13 @@ unsafe fn test_lasx_xvmulwev_d_wu() { 312983850752328844, ); - assert_eq!(r, transmute(lasx_xvmulwev_d_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmulwev_d_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10504,7 +12367,13 @@ unsafe fn test_lasx_xvmulwev_w_hu() { -4803214827614038190, ); - assert_eq!(r, transmute(lasx_xvmulwev_w_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmulwev_w_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10524,7 +12393,13 @@ unsafe fn test_lasx_xvmulwev_h_bu() { 4458585836433706972, ); - assert_eq!(r, transmute(lasx_xvmulwev_h_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmulwev_h_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10543,7 +12418,13 @@ unsafe fn test_lasx_xvaddwod_q_d() { ); let r = i64x4::new(-3813723879058076957, 0, 200103109406722390, 0); - assert_eq!(r, transmute(lasx_xvaddwod_q_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvaddwod_q_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10570,7 +12451,13 @@ unsafe fn test_lasx_xvaddwod_d_w() { ); let r = i64x4::new(3142724184, -2585235328, -785720463, 926940003); - assert_eq!(r, transmute(lasx_xvaddwod_d_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvaddwod_d_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10590,7 +12477,13 @@ unsafe fn test_lasx_xvaddwod_w_h() { -148498494282599, ); - assert_eq!(r, transmute(lasx_xvaddwod_w_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvaddwod_w_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10610,7 +12503,13 @@ unsafe fn test_lasx_xvaddwod_h_b() { -9570449863999416, ); - assert_eq!(r, transmute(lasx_xvaddwod_h_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvaddwod_h_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10629,7 +12528,13 @@ unsafe fn test_lasx_xvaddwod_q_du() { ); let r = i64x4::new(751645223963476143, 1, -1275901335613508018, 0); - assert_eq!(r, transmute(lasx_xvaddwod_q_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvaddwod_q_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10644,7 +12549,13 @@ unsafe fn test_lasx_xvaddwod_d_wu() { ); let r = i64x4::new(4757884041, 1673456593, 2162927615, 5143136401); - assert_eq!(r, transmute(lasx_xvaddwod_d_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvaddwod_d_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10664,7 +12575,13 @@ unsafe fn test_lasx_xvaddwod_w_hu() { 248416613500221, ); - assert_eq!(r, transmute(lasx_xvaddwod_w_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvaddwod_w_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10684,7 +12601,13 @@ unsafe fn test_lasx_xvaddwod_h_bu() { 83880238860075230, ); - assert_eq!(r, transmute(lasx_xvaddwod_h_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvaddwod_h_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10703,7 +12626,13 @@ unsafe fn test_lasx_xvsubwod_q_d() { ); let r = i64x4::new(1764856097736252489, 0, 7683656878360999333, -1); - assert_eq!(r, transmute(lasx_xvsubwod_q_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsubwod_q_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10730,7 +12659,13 @@ unsafe fn test_lasx_xvsubwod_d_w() { ); let r = i64x4::new(-959924898, 7572903, 2106559810, 3976421257); - assert_eq!(r, transmute(lasx_xvsubwod_d_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsubwod_d_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10750,7 +12685,13 @@ unsafe fn test_lasx_xvsubwod_w_h() { -17665200524651, ); - assert_eq!(r, transmute(lasx_xvsubwod_w_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsubwod_w_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10770,7 +12711,13 @@ unsafe fn test_lasx_xvsubwod_h_b() { -3939721971105776, ); - assert_eq!(r, transmute(lasx_xvsubwod_h_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsubwod_h_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10789,7 +12736,13 @@ unsafe fn test_lasx_xvsubwod_q_du() { ); let r = i64x4::new(-6069526046627127478, -1, -1804068722113556285, -1); - assert_eq!(r, transmute(lasx_xvsubwod_q_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsubwod_q_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10804,7 +12757,13 @@ unsafe fn test_lasx_xvsubwod_d_wu() { ); let r = i64x4::new(762157671, -772219478, -1655146846, -1402401592); - assert_eq!(r, transmute(lasx_xvsubwod_d_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsubwod_d_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10824,7 +12783,13 @@ unsafe fn test_lasx_xvsubwod_w_hu() { 164866614644743, ); - assert_eq!(r, transmute(lasx_xvsubwod_w_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsubwod_w_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10844,7 +12809,13 @@ unsafe fn test_lasx_xvsubwod_h_bu() { -280740536975491, ); - assert_eq!(r, transmute(lasx_xvsubwod_h_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsubwod_h_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10868,7 +12839,13 @@ unsafe fn test_lasx_xvmulwod_q_d() { -113061080830775254, ); - assert_eq!(r, transmute(lasx_xvmulwod_q_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmulwod_q_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10900,7 +12877,13 @@ unsafe fn test_lasx_xvmulwod_d_w() { -1334126209007208500, ); - assert_eq!(r, transmute(lasx_xvmulwod_d_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmulwod_d_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10920,7 +12903,13 @@ unsafe fn test_lasx_xvmulwod_w_h() { 337273560374881751, ); - assert_eq!(r, transmute(lasx_xvmulwod_w_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmulwod_w_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10940,7 +12929,13 @@ unsafe fn test_lasx_xvmulwod_h_b() { -797714991416606612, ); - assert_eq!(r, transmute(lasx_xvmulwod_h_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmulwod_h_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10964,7 +12959,13 @@ unsafe fn test_lasx_xvmulwod_q_du() { -6864651532066967840, ); - assert_eq!(r, transmute(lasx_xvmulwod_q_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmulwod_q_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -10984,7 +12985,13 @@ unsafe fn test_lasx_xvmulwod_d_wu() { 170736982952013264, ); - assert_eq!(r, transmute(lasx_xvmulwod_d_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmulwod_d_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -11004,7 +13011,13 @@ unsafe fn test_lasx_xvmulwod_w_hu() { 648970298882764352, ); - assert_eq!(r, transmute(lasx_xvmulwod_w_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmulwod_w_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -11024,7 +13037,13 @@ unsafe fn test_lasx_xvmulwod_h_bu() { 861263883582730760, ); - assert_eq!(r, transmute(lasx_xvmulwod_h_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvmulwod_h_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -11047,7 +13066,10 @@ unsafe fn test_lasx_xvaddwev_d_wu_w() { assert_eq!( r, - transmute(lasx_xvaddwev_d_wu_w(transmute(a), transmute(b))) + transmute(lasx_xvaddwev_d_wu_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -11070,7 +13092,10 @@ unsafe fn test_lasx_xvaddwev_w_hu_h() { assert_eq!( r, - transmute(lasx_xvaddwev_w_hu_h(transmute(a), transmute(b))) + transmute(lasx_xvaddwev_w_hu_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -11093,7 +13118,10 @@ unsafe fn test_lasx_xvaddwev_h_bu_b() { assert_eq!( r, - transmute(lasx_xvaddwev_h_bu_b(transmute(a), transmute(b))) + transmute(lasx_xvaddwev_h_bu_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -11122,7 +13150,10 @@ unsafe fn test_lasx_xvmulwev_d_wu_w() { assert_eq!( r, - transmute(lasx_xvmulwev_d_wu_w(transmute(a), transmute(b))) + transmute(lasx_xvmulwev_d_wu_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -11145,7 +13176,10 @@ unsafe fn test_lasx_xvmulwev_w_hu_h() { assert_eq!( r, - transmute(lasx_xvmulwev_w_hu_h(transmute(a), transmute(b))) + transmute(lasx_xvmulwev_w_hu_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -11168,7 +13202,10 @@ unsafe fn test_lasx_xvmulwev_h_bu_b() { assert_eq!( r, - transmute(lasx_xvmulwev_h_bu_b(transmute(a), transmute(b))) + transmute(lasx_xvmulwev_h_bu_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -11192,7 +13229,10 @@ unsafe fn test_lasx_xvaddwod_d_wu_w() { assert_eq!( r, - transmute(lasx_xvaddwod_d_wu_w(transmute(a), transmute(b))) + transmute(lasx_xvaddwod_d_wu_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -11215,7 +13255,10 @@ unsafe fn test_lasx_xvaddwod_w_hu_h() { assert_eq!( r, - transmute(lasx_xvaddwod_w_hu_h(transmute(a), transmute(b))) + transmute(lasx_xvaddwod_w_hu_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -11238,7 +13281,10 @@ unsafe fn test_lasx_xvaddwod_h_bu_b() { assert_eq!( r, - transmute(lasx_xvaddwod_h_bu_b(transmute(a), transmute(b))) + transmute(lasx_xvaddwod_h_bu_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -11267,7 +13313,10 @@ unsafe fn test_lasx_xvmulwod_d_wu_w() { assert_eq!( r, - transmute(lasx_xvmulwod_d_wu_w(transmute(a), transmute(b))) + transmute(lasx_xvmulwod_d_wu_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -11290,7 +13339,10 @@ unsafe fn test_lasx_xvmulwod_w_hu_h() { assert_eq!( r, - transmute(lasx_xvmulwod_w_hu_h(transmute(a), transmute(b))) + transmute(lasx_xvmulwod_w_hu_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -11313,7 +13365,10 @@ unsafe fn test_lasx_xvmulwod_h_bu_b() { assert_eq!( r, - transmute(lasx_xvmulwod_h_bu_b(transmute(a), transmute(b))) + transmute(lasx_xvmulwod_h_bu_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -11333,7 +13388,13 @@ unsafe fn test_lasx_xvhaddw_q_d() { ); let r = i64x4::new(7070440900316630840, -1, 4582440905924999074, 0); - assert_eq!(r, transmute(lasx_xvhaddw_q_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvhaddw_q_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -11352,7 +13413,13 @@ unsafe fn test_lasx_xvhaddw_qu_du() { ); let r = i64x4::new(-6342973196760799579, 0, -6232960347008472572, 1); - assert_eq!(r, transmute(lasx_xvhaddw_qu_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvhaddw_qu_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -11371,7 +13438,13 @@ unsafe fn test_lasx_xvhsubw_q_d() { ); let r = i64x4::new(5317548498597883842, 0, 6155348192460751216, -1); - assert_eq!(r, transmute(lasx_xvhsubw_q_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvhsubw_q_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -11390,7 +13463,13 @@ unsafe fn test_lasx_xvhsubw_qu_du() { ); let r = i64x4::new(11053881530518619, 0, -1215853579082277290, -1); - assert_eq!(r, transmute(lasx_xvhsubw_qu_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvhsubw_qu_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -11422,7 +13501,11 @@ unsafe fn test_lasx_xvmaddwev_q_d() { assert_eq!( r, - transmute(lasx_xvmaddwev_q_d(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvmaddwev_q_d( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -11463,7 +13546,11 @@ unsafe fn test_lasx_xvmaddwev_d_w() { assert_eq!( r, - transmute(lasx_xvmaddwev_d_w(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvmaddwev_d_w( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -11496,7 +13583,11 @@ unsafe fn test_lasx_xvmaddwev_w_h() { assert_eq!( r, - transmute(lasx_xvmaddwev_w_h(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvmaddwev_w_h( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -11523,7 +13614,11 @@ unsafe fn test_lasx_xvmaddwev_h_b() { assert_eq!( r, - transmute(lasx_xvmaddwev_h_b(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvmaddwev_h_b( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -11557,9 +13652,9 @@ unsafe fn test_lasx_xvmaddwev_q_du() { assert_eq!( r, transmute(lasx_xvmaddwev_q_du( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -11590,9 +13685,9 @@ unsafe fn test_lasx_xvmaddwev_d_wu() { assert_eq!( r, transmute(lasx_xvmaddwev_d_wu( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -11621,9 +13716,9 @@ unsafe fn test_lasx_xvmaddwev_w_hu() { assert_eq!( r, transmute(lasx_xvmaddwev_w_hu( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -11652,9 +13747,9 @@ unsafe fn test_lasx_xvmaddwev_h_bu() { assert_eq!( r, transmute(lasx_xvmaddwev_h_bu( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -11688,7 +13783,11 @@ unsafe fn test_lasx_xvmaddwod_q_d() { assert_eq!( r, - transmute(lasx_xvmaddwod_q_d(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvmaddwod_q_d( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -11729,7 +13828,11 @@ unsafe fn test_lasx_xvmaddwod_d_w() { assert_eq!( r, - transmute(lasx_xvmaddwod_d_w(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvmaddwod_d_w( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -11762,7 +13865,11 @@ unsafe fn test_lasx_xvmaddwod_w_h() { assert_eq!( r, - transmute(lasx_xvmaddwod_w_h(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvmaddwod_w_h( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -11789,7 +13896,11 @@ unsafe fn test_lasx_xvmaddwod_h_b() { assert_eq!( r, - transmute(lasx_xvmaddwod_h_b(transmute(a), transmute(b), transmute(c))) + transmute(lasx_xvmaddwod_h_b( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -11823,9 +13934,9 @@ unsafe fn test_lasx_xvmaddwod_q_du() { assert_eq!( r, transmute(lasx_xvmaddwod_q_du( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -11856,9 +13967,9 @@ unsafe fn test_lasx_xvmaddwod_d_wu() { assert_eq!( r, transmute(lasx_xvmaddwod_d_wu( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -11887,9 +13998,9 @@ unsafe fn test_lasx_xvmaddwod_w_hu() { assert_eq!( r, transmute(lasx_xvmaddwod_w_hu( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -11918,9 +14029,9 @@ unsafe fn test_lasx_xvmaddwod_h_bu() { assert_eq!( r, transmute(lasx_xvmaddwod_h_bu( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -11955,9 +14066,9 @@ unsafe fn test_lasx_xvmaddwev_q_du_d() { assert_eq!( r, transmute(lasx_xvmaddwev_q_du_d( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -11994,9 +14105,9 @@ unsafe fn test_lasx_xvmaddwev_d_wu_w() { assert_eq!( r, transmute(lasx_xvmaddwev_d_wu_w( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -12031,9 +14142,9 @@ unsafe fn test_lasx_xvmaddwev_w_hu_h() { assert_eq!( r, transmute(lasx_xvmaddwev_w_hu_h( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -12062,9 +14173,9 @@ unsafe fn test_lasx_xvmaddwev_h_bu_b() { assert_eq!( r, transmute(lasx_xvmaddwev_h_bu_b( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -12099,9 +14210,9 @@ unsafe fn test_lasx_xvmaddwod_q_du_d() { assert_eq!( r, transmute(lasx_xvmaddwod_q_du_d( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -12138,9 +14249,9 @@ unsafe fn test_lasx_xvmaddwod_d_wu_w() { assert_eq!( r, transmute(lasx_xvmaddwod_d_wu_w( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -12169,9 +14280,9 @@ unsafe fn test_lasx_xvmaddwod_w_hu_h() { assert_eq!( r, transmute(lasx_xvmaddwod_w_hu_h( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -12200,9 +14311,9 @@ unsafe fn test_lasx_xvmaddwod_h_bu_b() { assert_eq!( r, transmute(lasx_xvmaddwod_h_bu_b( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -12224,7 +14335,13 @@ unsafe fn test_lasx_xvrotr_b() { 5842271601646106402, ); - assert_eq!(r, transmute(lasx_xvrotr_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvrotr_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -12244,7 +14361,13 @@ unsafe fn test_lasx_xvrotr_h() { 8109266518466894464, ); - assert_eq!(r, transmute(lasx_xvrotr_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvrotr_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -12264,7 +14387,13 @@ unsafe fn test_lasx_xvrotr_w() { 8567937817891640092, ); - assert_eq!(r, transmute(lasx_xvrotr_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvrotr_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -12288,7 +14417,13 @@ unsafe fn test_lasx_xvrotr_d() { 4254025119287920211, ); - assert_eq!(r, transmute(lasx_xvrotr_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvrotr_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -12312,7 +14447,13 @@ unsafe fn test_lasx_xvadd_q() { 1706530784161666452, ); - assert_eq!(r, transmute(lasx_xvadd_q(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvadd_q( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -12336,7 +14477,13 @@ unsafe fn test_lasx_xvsub_q() { 1242748497994781383, ); - assert_eq!(r, transmute(lasx_xvsub_q(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvsub_q( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -12357,7 +14504,10 @@ unsafe fn test_lasx_xvaddwev_q_du_d() { assert_eq!( r, - transmute(lasx_xvaddwev_q_du_d(transmute(a), transmute(b))) + transmute(lasx_xvaddwev_q_du_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -12379,7 +14529,10 @@ unsafe fn test_lasx_xvaddwod_q_du_d() { assert_eq!( r, - transmute(lasx_xvaddwod_q_du_d(transmute(a), transmute(b))) + transmute(lasx_xvaddwod_q_du_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -12406,7 +14559,10 @@ unsafe fn test_lasx_xvmulwev_q_du_d() { assert_eq!( r, - transmute(lasx_xvmulwev_q_du_d(transmute(a), transmute(b))) + transmute(lasx_xvmulwev_q_du_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -12433,7 +14589,10 @@ unsafe fn test_lasx_xvmulwod_q_du_d() { assert_eq!( r, - transmute(lasx_xvmulwod_q_du_d(transmute(a), transmute(b))) + transmute(lasx_xvmulwod_q_du_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -12445,7 +14604,7 @@ unsafe fn test_lasx_xvmskgez_b() { ); let r = i64x4::new(13289, 0, 4927, 0); - assert_eq!(r, transmute(lasx_xvmskgez_b(transmute(a)))); + assert_eq!(r, transmute(lasx_xvmskgez_b(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -12456,7 +14615,7 @@ unsafe fn test_lasx_xvmsknz_b() { ); let r = i64x4::new(65535, 0, 65535, 0); - assert_eq!(r, transmute(lasx_xvmsknz_b(transmute(a)))); + assert_eq!(r, transmute(lasx_xvmsknz_b(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -12472,7 +14631,7 @@ unsafe fn test_lasx_xvexth_h_b() { -1689051729887256, ); - assert_eq!(r, transmute(lasx_xvexth_h_b(transmute(a)))); + assert_eq!(r, transmute(lasx_xvexth_h_b(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -12488,7 +14647,7 @@ unsafe fn test_lasx_xvexth_w_h() { -117171002791439, ); - assert_eq!(r, transmute(lasx_xvexth_w_h(transmute(a)))); + assert_eq!(r, transmute(lasx_xvexth_w_h(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -12505,7 +14664,7 @@ unsafe fn test_lasx_xvexth_d_w() { ); let r = i64x4::new(78514216, -1063299454, -1487536177, 1875317589); - assert_eq!(r, transmute(lasx_xvexth_d_w(transmute(a)))); + assert_eq!(r, transmute(lasx_xvexth_d_w(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -12518,7 +14677,7 @@ unsafe fn test_lasx_xvexth_q_d() { ); let r = i64x4::new(5196480214883180720, 0, 7776492634988202392, 0); - assert_eq!(r, transmute(lasx_xvexth_q_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvexth_q_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -12534,7 +14693,7 @@ unsafe fn test_lasx_xvexth_hu_bu() { 11259067788754993, ); - assert_eq!(r, transmute(lasx_xvexth_hu_bu(transmute(a)))); + assert_eq!(r, transmute(lasx_xvexth_hu_bu(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -12550,7 +14709,7 @@ unsafe fn test_lasx_xvexth_wu_hu() { 211376815493177, ); - assert_eq!(r, transmute(lasx_xvexth_wu_hu(transmute(a)))); + assert_eq!(r, transmute(lasx_xvexth_wu_hu(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -12561,7 +14720,7 @@ unsafe fn test_lasx_xvexth_du_wu() { ); let r = i64x4::new(3486710391, 717721410, 1954296323, 1406265475); - assert_eq!(r, transmute(lasx_xvexth_du_wu(transmute(a)))); + assert_eq!(r, transmute(lasx_xvexth_du_wu(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -12574,7 +14733,7 @@ unsafe fn test_lasx_xvexth_qu_du() { ); let r = i64x4::new(6305760528044738869, 0, 3857202168052068182, 0); - assert_eq!(r, transmute(lasx_xvexth_qu_du(transmute(a)))); + assert_eq!(r, transmute(lasx_xvexth_qu_du(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -12590,7 +14749,7 @@ unsafe fn test_lasx_xvrotri_b() { -3500418816657076903, ); - assert_eq!(r, transmute(lasx_xvrotri_b::<4>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvrotri_b::<4>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -12606,7 +14765,7 @@ unsafe fn test_lasx_xvrotri_h() { 4779464405959485451, ); - assert_eq!(r, transmute(lasx_xvrotri_h::<15>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvrotri_h::<15>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -12628,7 +14787,7 @@ unsafe fn test_lasx_xvrotri_w() { -1679179889808014898, ); - assert_eq!(r, transmute(lasx_xvrotri_w::<11>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvrotri_w::<11>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -12646,7 +14805,7 @@ unsafe fn test_lasx_xvrotri_d() { -7958311692822812825, ); - assert_eq!(r, transmute(lasx_xvrotri_d::<16>(transmute(a)))); + assert_eq!(r, transmute(lasx_xvrotri_d::<16>(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -12659,7 +14818,7 @@ unsafe fn test_lasx_xvextl_q_d() { ); let r = i64x4::new(-4167783494125842132, -1, 7476993593286219399, 0); - assert_eq!(r, transmute(lasx_xvextl_q_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xvextl_q_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -12681,7 +14840,10 @@ unsafe fn test_lasx_xvsrlni_b_h() { assert_eq!( r, - transmute(lasx_xvsrlni_b_h::<4>(transmute(a), transmute(b))) + transmute(lasx_xvsrlni_b_h::<4>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -12704,7 +14866,10 @@ unsafe fn test_lasx_xvsrlni_h_w() { assert_eq!( r, - transmute(lasx_xvsrlni_h_w::<16>(transmute(a), transmute(b))) + transmute(lasx_xvsrlni_h_w::<16>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -12739,7 +14904,10 @@ unsafe fn test_lasx_xvsrlni_w_d() { assert_eq!( r, - transmute(lasx_xvsrlni_w_d::<26>(transmute(a), transmute(b))) + transmute(lasx_xvsrlni_w_d::<26>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -12761,7 +14929,10 @@ unsafe fn test_lasx_xvsrlni_d_q() { assert_eq!( r, - transmute(lasx_xvsrlni_d_q::<102>(transmute(a), transmute(b))) + transmute(lasx_xvsrlni_d_q::<102>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -12784,7 +14955,10 @@ unsafe fn test_lasx_xvsrlrni_b_h() { assert_eq!( r, - transmute(lasx_xvsrlrni_b_h::<8>(transmute(a), transmute(b))) + transmute(lasx_xvsrlrni_b_h::<8>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -12807,7 +14981,10 @@ unsafe fn test_lasx_xvsrlrni_h_w() { assert_eq!( r, - transmute(lasx_xvsrlrni_h_w::<5>(transmute(a), transmute(b))) + transmute(lasx_xvsrlrni_h_w::<5>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -12842,7 +15019,10 @@ unsafe fn test_lasx_xvsrlrni_w_d() { assert_eq!( r, - transmute(lasx_xvsrlrni_w_d::<43>(transmute(a), transmute(b))) + transmute(lasx_xvsrlrni_w_d::<43>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -12864,7 +15044,10 @@ unsafe fn test_lasx_xvsrlrni_d_q() { assert_eq!( r, - transmute(lasx_xvsrlrni_d_q::<126>(transmute(a), transmute(b))) + transmute(lasx_xvsrlrni_d_q::<126>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -12887,7 +15070,10 @@ unsafe fn test_lasx_xvssrlni_b_h() { assert_eq!( r, - transmute(lasx_xvssrlni_b_h::<4>(transmute(a), transmute(b))) + transmute(lasx_xvssrlni_b_h::<4>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -12905,7 +15091,10 @@ unsafe fn test_lasx_xvssrlni_h_w() { assert_eq!( r, - transmute(lasx_xvssrlni_h_w::<31>(transmute(a), transmute(b))) + transmute(lasx_xvssrlni_h_w::<31>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -12940,7 +15129,10 @@ unsafe fn test_lasx_xvssrlni_w_d() { assert_eq!( r, - transmute(lasx_xvssrlni_w_d::<14>(transmute(a), transmute(b))) + transmute(lasx_xvssrlni_w_d::<14>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -12967,7 +15159,10 @@ unsafe fn test_lasx_xvssrlni_d_q() { assert_eq!( r, - transmute(lasx_xvssrlni_d_q::<35>(transmute(a), transmute(b))) + transmute(lasx_xvssrlni_d_q::<35>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -12990,7 +15185,10 @@ unsafe fn test_lasx_xvssrlni_bu_h() { assert_eq!( r, - transmute(lasx_xvssrlni_bu_h::<11>(transmute(a), transmute(b))) + transmute(lasx_xvssrlni_bu_h::<11>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13008,7 +15206,10 @@ unsafe fn test_lasx_xvssrlni_hu_w() { assert_eq!( r, - transmute(lasx_xvssrlni_hu_w::<31>(transmute(a), transmute(b))) + transmute(lasx_xvssrlni_hu_w::<31>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13032,7 +15233,10 @@ unsafe fn test_lasx_xvssrlni_wu_d() { assert_eq!( r, - transmute(lasx_xvssrlni_wu_d::<24>(transmute(a), transmute(b))) + transmute(lasx_xvssrlni_wu_d::<24>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13054,7 +15258,10 @@ unsafe fn test_lasx_xvssrlni_du_q() { assert_eq!( r, - transmute(lasx_xvssrlni_du_q::<109>(transmute(a), transmute(b))) + transmute(lasx_xvssrlni_du_q::<109>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13077,7 +15284,10 @@ unsafe fn test_lasx_xvssrlrni_b_h() { assert_eq!( r, - transmute(lasx_xvssrlrni_b_h::<7>(transmute(a), transmute(b))) + transmute(lasx_xvssrlrni_b_h::<7>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13100,7 +15310,10 @@ unsafe fn test_lasx_xvssrlrni_h_w() { assert_eq!( r, - transmute(lasx_xvssrlrni_h_w::<11>(transmute(a), transmute(b))) + transmute(lasx_xvssrlrni_h_w::<11>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13135,7 +15348,10 @@ unsafe fn test_lasx_xvssrlrni_w_d() { assert_eq!( r, - transmute(lasx_xvssrlrni_w_d::<27>(transmute(a), transmute(b))) + transmute(lasx_xvssrlrni_w_d::<27>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13157,7 +15373,10 @@ unsafe fn test_lasx_xvssrlrni_d_q() { assert_eq!( r, - transmute(lasx_xvssrlrni_d_q::<94>(transmute(a), transmute(b))) + transmute(lasx_xvssrlrni_d_q::<94>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13175,7 +15394,10 @@ unsafe fn test_lasx_xvssrlrni_bu_h() { assert_eq!( r, - transmute(lasx_xvssrlrni_bu_h::<4>(transmute(a), transmute(b))) + transmute(lasx_xvssrlrni_bu_h::<4>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13198,7 +15420,10 @@ unsafe fn test_lasx_xvssrlrni_hu_w() { assert_eq!( r, - transmute(lasx_xvssrlrni_hu_w::<16>(transmute(a), transmute(b))) + transmute(lasx_xvssrlrni_hu_w::<16>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13227,7 +15452,10 @@ unsafe fn test_lasx_xvssrlrni_wu_d() { assert_eq!( r, - transmute(lasx_xvssrlrni_wu_d::<50>(transmute(a), transmute(b))) + transmute(lasx_xvssrlrni_wu_d::<50>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13249,7 +15477,10 @@ unsafe fn test_lasx_xvssrlrni_du_q() { assert_eq!( r, - transmute(lasx_xvssrlrni_du_q::<53>(transmute(a), transmute(b))) + transmute(lasx_xvssrlrni_du_q::<53>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13272,7 +15503,10 @@ unsafe fn test_lasx_xvsrani_b_h() { assert_eq!( r, - transmute(lasx_xvsrani_b_h::<8>(transmute(a), transmute(b))) + transmute(lasx_xvsrani_b_h::<8>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13295,7 +15529,10 @@ unsafe fn test_lasx_xvsrani_h_w() { assert_eq!( r, - transmute(lasx_xvsrani_h_w::<0>(transmute(a), transmute(b))) + transmute(lasx_xvsrani_h_w::<0>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13330,7 +15567,10 @@ unsafe fn test_lasx_xvsrani_w_d() { assert_eq!( r, - transmute(lasx_xvsrani_w_d::<28>(transmute(a), transmute(b))) + transmute(lasx_xvsrani_w_d::<28>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13357,7 +15597,10 @@ unsafe fn test_lasx_xvsrani_d_q() { assert_eq!( r, - transmute(lasx_xvsrani_d_q::<66>(transmute(a), transmute(b))) + transmute(lasx_xvsrani_d_q::<66>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13380,7 +15623,10 @@ unsafe fn test_lasx_xvsrarni_b_h() { assert_eq!( r, - transmute(lasx_xvsrarni_b_h::<4>(transmute(a), transmute(b))) + transmute(lasx_xvsrarni_b_h::<4>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13403,7 +15649,10 @@ unsafe fn test_lasx_xvsrarni_h_w() { assert_eq!( r, - transmute(lasx_xvsrarni_h_w::<9>(transmute(a), transmute(b))) + transmute(lasx_xvsrarni_h_w::<9>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13427,7 +15676,10 @@ unsafe fn test_lasx_xvsrarni_w_d() { assert_eq!( r, - transmute(lasx_xvsrarni_w_d::<63>(transmute(a), transmute(b))) + transmute(lasx_xvsrarni_w_d::<63>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13449,7 +15701,10 @@ unsafe fn test_lasx_xvsrarni_d_q() { assert_eq!( r, - transmute(lasx_xvsrarni_d_q::<102>(transmute(a), transmute(b))) + transmute(lasx_xvsrarni_d_q::<102>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13472,7 +15727,10 @@ unsafe fn test_lasx_xvssrani_b_h() { assert_eq!( r, - transmute(lasx_xvssrani_b_h::<5>(transmute(a), transmute(b))) + transmute(lasx_xvssrani_b_h::<5>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13495,7 +15753,10 @@ unsafe fn test_lasx_xvssrani_h_w() { assert_eq!( r, - transmute(lasx_xvssrani_h_w::<0>(transmute(a), transmute(b))) + transmute(lasx_xvssrani_h_w::<0>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13530,7 +15791,10 @@ unsafe fn test_lasx_xvssrani_w_d() { assert_eq!( r, - transmute(lasx_xvssrani_w_d::<45>(transmute(a), transmute(b))) + transmute(lasx_xvssrani_w_d::<45>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13557,7 +15821,10 @@ unsafe fn test_lasx_xvssrani_d_q() { assert_eq!( r, - transmute(lasx_xvssrani_d_q::<73>(transmute(a), transmute(b))) + transmute(lasx_xvssrani_d_q::<73>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13575,7 +15842,10 @@ unsafe fn test_lasx_xvssrani_bu_h() { assert_eq!( r, - transmute(lasx_xvssrani_bu_h::<12>(transmute(a), transmute(b))) + transmute(lasx_xvssrani_bu_h::<12>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13593,7 +15863,10 @@ unsafe fn test_lasx_xvssrani_hu_w() { assert_eq!( r, - transmute(lasx_xvssrani_hu_w::<9>(transmute(a), transmute(b))) + transmute(lasx_xvssrani_hu_w::<9>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13617,7 +15890,10 @@ unsafe fn test_lasx_xvssrani_wu_d() { assert_eq!( r, - transmute(lasx_xvssrani_wu_d::<42>(transmute(a), transmute(b))) + transmute(lasx_xvssrani_wu_d::<42>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13639,7 +15915,10 @@ unsafe fn test_lasx_xvssrani_du_q() { assert_eq!( r, - transmute(lasx_xvssrani_du_q::<115>(transmute(a), transmute(b))) + transmute(lasx_xvssrani_du_q::<115>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13662,7 +15941,10 @@ unsafe fn test_lasx_xvssrarni_b_h() { assert_eq!( r, - transmute(lasx_xvssrarni_b_h::<6>(transmute(a), transmute(b))) + transmute(lasx_xvssrarni_b_h::<6>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13685,7 +15967,10 @@ unsafe fn test_lasx_xvssrarni_h_w() { assert_eq!( r, - transmute(lasx_xvssrarni_h_w::<25>(transmute(a), transmute(b))) + transmute(lasx_xvssrarni_h_w::<25>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13715,7 +16000,10 @@ unsafe fn test_lasx_xvssrarni_w_d() { assert_eq!( r, - transmute(lasx_xvssrarni_w_d::<61>(transmute(a), transmute(b))) + transmute(lasx_xvssrarni_w_d::<61>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13737,7 +16025,10 @@ unsafe fn test_lasx_xvssrarni_d_q() { assert_eq!( r, - transmute(lasx_xvssrarni_d_q::<123>(transmute(a), transmute(b))) + transmute(lasx_xvssrarni_d_q::<123>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13760,7 +16051,10 @@ unsafe fn test_lasx_xvssrarni_bu_h() { assert_eq!( r, - transmute(lasx_xvssrarni_bu_h::<10>(transmute(a), transmute(b))) + transmute(lasx_xvssrarni_bu_h::<10>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13778,7 +16072,10 @@ unsafe fn test_lasx_xvssrarni_hu_w() { assert_eq!( r, - transmute(lasx_xvssrarni_hu_w::<30>(transmute(a), transmute(b))) + transmute(lasx_xvssrarni_hu_w::<30>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13802,7 +16099,10 @@ unsafe fn test_lasx_xvssrarni_wu_d() { assert_eq!( r, - transmute(lasx_xvssrarni_wu_d::<61>(transmute(a), transmute(b))) + transmute(lasx_xvssrarni_wu_d::<61>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13824,7 +16124,10 @@ unsafe fn test_lasx_xvssrarni_du_q() { assert_eq!( r, - transmute(lasx_xvssrarni_du_q::<15>(transmute(a), transmute(b))) + transmute(lasx_xvssrarni_du_q::<15>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -13836,7 +16139,7 @@ unsafe fn test_lasx_xbnz_b() { ); let r: i32 = 1; - assert_eq!(r, transmute(lasx_xbnz_b(transmute(a)))); + assert_eq!(r, transmute(lasx_xbnz_b(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -13849,7 +16152,7 @@ unsafe fn test_lasx_xbnz_d() { ); let r: i32 = 1; - assert_eq!(r, transmute(lasx_xbnz_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xbnz_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -13860,7 +16163,7 @@ unsafe fn test_lasx_xbnz_h() { ); let r: i32 = 1; - assert_eq!(r, transmute(lasx_xbnz_h(transmute(a)))); + assert_eq!(r, transmute(lasx_xbnz_h(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -13871,7 +16174,7 @@ unsafe fn test_lasx_xbnz_v() { ); let r: i32 = 1; - assert_eq!(r, transmute(lasx_xbnz_v(transmute(a)))); + assert_eq!(r, transmute(lasx_xbnz_v(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -13882,7 +16185,7 @@ unsafe fn test_lasx_xbnz_w() { ); let r: i32 = 1; - assert_eq!(r, transmute(lasx_xbnz_w(transmute(a)))); + assert_eq!(r, transmute(lasx_xbnz_w(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -13893,7 +16196,7 @@ unsafe fn test_lasx_xbz_b() { ); let r: i32 = 0; - assert_eq!(r, transmute(lasx_xbz_b(transmute(a)))); + assert_eq!(r, transmute(lasx_xbz_b(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -13906,7 +16209,7 @@ unsafe fn test_lasx_xbz_d() { ); let r: i32 = 0; - assert_eq!(r, transmute(lasx_xbz_d(transmute(a)))); + assert_eq!(r, transmute(lasx_xbz_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -13917,7 +16220,7 @@ unsafe fn test_lasx_xbz_h() { ); let r: i32 = 0; - assert_eq!(r, transmute(lasx_xbz_h(transmute(a)))); + assert_eq!(r, transmute(lasx_xbz_h(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -13928,7 +16231,7 @@ unsafe fn test_lasx_xbz_v() { ); let r: i32 = 0; - assert_eq!(r, transmute(lasx_xbz_v(transmute(a)))); + assert_eq!(r, transmute(lasx_xbz_v(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -13939,7 +16242,7 @@ unsafe fn test_lasx_xbz_w() { ); let r: i32 = 0; - assert_eq!(r, transmute(lasx_xbz_w(transmute(a)))); + assert_eq!(r, transmute(lasx_xbz_w(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -13958,7 +16261,13 @@ unsafe fn test_lasx_xvfcmp_caf_d() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvfcmp_caf_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_caf_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -13973,7 +16282,13 @@ unsafe fn test_lasx_xvfcmp_caf_s() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvfcmp_caf_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_caf_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -13992,7 +16307,13 @@ unsafe fn test_lasx_xvfcmp_ceq_d() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvfcmp_ceq_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_ceq_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14007,7 +16328,13 @@ unsafe fn test_lasx_xvfcmp_ceq_s() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvfcmp_ceq_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_ceq_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14026,7 +16353,13 @@ unsafe fn test_lasx_xvfcmp_cle_d() { ); let r = i64x4::new(-1, -1, -1, 0); - assert_eq!(r, transmute(lasx_xvfcmp_cle_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_cle_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14041,7 +16374,13 @@ unsafe fn test_lasx_xvfcmp_cle_s() { ); let r = i64x4::new(0, -1, -1, -4294967296); - assert_eq!(r, transmute(lasx_xvfcmp_cle_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_cle_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14060,7 +16399,13 @@ unsafe fn test_lasx_xvfcmp_clt_d() { ); let r = i64x4::new(0, -1, 0, -1); - assert_eq!(r, transmute(lasx_xvfcmp_clt_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_clt_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14075,7 +16420,13 @@ unsafe fn test_lasx_xvfcmp_clt_s() { ); let r = i64x4::new(-1, 4294967295, -1, -1); - assert_eq!(r, transmute(lasx_xvfcmp_clt_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_clt_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14094,7 +16445,13 @@ unsafe fn test_lasx_xvfcmp_cne_d() { ); let r = i64x4::new(-1, -1, -1, -1); - assert_eq!(r, transmute(lasx_xvfcmp_cne_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_cne_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14109,7 +16466,13 @@ unsafe fn test_lasx_xvfcmp_cne_s() { ); let r = i64x4::new(-1, -1, -1, -1); - assert_eq!(r, transmute(lasx_xvfcmp_cne_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_cne_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14128,7 +16491,13 @@ unsafe fn test_lasx_xvfcmp_cor_d() { ); let r = i64x4::new(-1, -1, -1, -1); - assert_eq!(r, transmute(lasx_xvfcmp_cor_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_cor_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14143,7 +16512,13 @@ unsafe fn test_lasx_xvfcmp_cor_s() { ); let r = i64x4::new(-1, -1, -1, -1); - assert_eq!(r, transmute(lasx_xvfcmp_cor_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_cor_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14162,7 +16537,13 @@ unsafe fn test_lasx_xvfcmp_cueq_d() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvfcmp_cueq_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_cueq_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14177,7 +16558,13 @@ unsafe fn test_lasx_xvfcmp_cueq_s() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvfcmp_cueq_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_cueq_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14196,7 +16583,13 @@ unsafe fn test_lasx_xvfcmp_cule_d() { ); let r = i64x4::new(0, -1, -1, 0); - assert_eq!(r, transmute(lasx_xvfcmp_cule_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_cule_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14211,7 +16604,13 @@ unsafe fn test_lasx_xvfcmp_cule_s() { ); let r = i64x4::new(-4294967296, 4294967295, 4294967295, -1); - assert_eq!(r, transmute(lasx_xvfcmp_cule_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_cule_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14230,7 +16629,13 @@ unsafe fn test_lasx_xvfcmp_cult_d() { ); let r = i64x4::new(0, -1, 0, 0); - assert_eq!(r, transmute(lasx_xvfcmp_cult_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_cult_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14245,7 +16650,13 @@ unsafe fn test_lasx_xvfcmp_cult_s() { ); let r = i64x4::new(-1, 0, -1, -1); - assert_eq!(r, transmute(lasx_xvfcmp_cult_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_cult_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14264,7 +16675,13 @@ unsafe fn test_lasx_xvfcmp_cun_d() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvfcmp_cun_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_cun_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14283,7 +16700,13 @@ unsafe fn test_lasx_xvfcmp_cune_d() { ); let r = i64x4::new(-1, -1, -1, -1); - assert_eq!(r, transmute(lasx_xvfcmp_cune_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_cune_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14298,7 +16721,13 @@ unsafe fn test_lasx_xvfcmp_cune_s() { ); let r = i64x4::new(-1, -1, -1, -1); - assert_eq!(r, transmute(lasx_xvfcmp_cune_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_cune_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14313,7 +16742,13 @@ unsafe fn test_lasx_xvfcmp_cun_s() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvfcmp_cun_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_cun_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14332,7 +16767,13 @@ unsafe fn test_lasx_xvfcmp_saf_d() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvfcmp_saf_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_saf_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14347,7 +16788,13 @@ unsafe fn test_lasx_xvfcmp_saf_s() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvfcmp_saf_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_saf_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14366,7 +16813,13 @@ unsafe fn test_lasx_xvfcmp_seq_d() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvfcmp_seq_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_seq_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14381,7 +16834,13 @@ unsafe fn test_lasx_xvfcmp_seq_s() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvfcmp_seq_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_seq_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14400,7 +16859,13 @@ unsafe fn test_lasx_xvfcmp_sle_d() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvfcmp_sle_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_sle_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14415,7 +16880,13 @@ unsafe fn test_lasx_xvfcmp_sle_s() { ); let r = i64x4::new(0, 4294967295, -1, 0); - assert_eq!(r, transmute(lasx_xvfcmp_sle_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_sle_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14434,7 +16905,13 @@ unsafe fn test_lasx_xvfcmp_slt_d() { ); let r = i64x4::new(0, -1, -1, 0); - assert_eq!(r, transmute(lasx_xvfcmp_slt_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_slt_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14449,7 +16926,13 @@ unsafe fn test_lasx_xvfcmp_slt_s() { ); let r = i64x4::new(0, -4294967296, 4294967295, -1); - assert_eq!(r, transmute(lasx_xvfcmp_slt_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_slt_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14468,7 +16951,13 @@ unsafe fn test_lasx_xvfcmp_sne_d() { ); let r = i64x4::new(-1, -1, -1, -1); - assert_eq!(r, transmute(lasx_xvfcmp_sne_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_sne_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14483,7 +16972,13 @@ unsafe fn test_lasx_xvfcmp_sne_s() { ); let r = i64x4::new(-1, -1, -1, -1); - assert_eq!(r, transmute(lasx_xvfcmp_sne_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_sne_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14502,7 +16997,13 @@ unsafe fn test_lasx_xvfcmp_sor_d() { ); let r = i64x4::new(-1, -1, -1, -1); - assert_eq!(r, transmute(lasx_xvfcmp_sor_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_sor_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14517,7 +17018,13 @@ unsafe fn test_lasx_xvfcmp_sor_s() { ); let r = i64x4::new(-1, -1, -1, -1); - assert_eq!(r, transmute(lasx_xvfcmp_sor_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_sor_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14536,7 +17043,13 @@ unsafe fn test_lasx_xvfcmp_sueq_d() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvfcmp_sueq_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_sueq_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14551,7 +17064,13 @@ unsafe fn test_lasx_xvfcmp_sueq_s() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvfcmp_sueq_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_sueq_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14570,7 +17089,13 @@ unsafe fn test_lasx_xvfcmp_sule_d() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvfcmp_sule_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_sule_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14585,7 +17110,13 @@ unsafe fn test_lasx_xvfcmp_sule_s() { ); let r = i64x4::new(0, 4294967295, 0, 0); - assert_eq!(r, transmute(lasx_xvfcmp_sule_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_sule_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14604,7 +17135,13 @@ unsafe fn test_lasx_xvfcmp_sult_d() { ); let r = i64x4::new(0, -1, 0, -1); - assert_eq!(r, transmute(lasx_xvfcmp_sult_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_sult_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14619,7 +17156,13 @@ unsafe fn test_lasx_xvfcmp_sult_s() { ); let r = i64x4::new(-1, 4294967295, -1, 0); - assert_eq!(r, transmute(lasx_xvfcmp_sult_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_sult_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14638,7 +17181,13 @@ unsafe fn test_lasx_xvfcmp_sun_d() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvfcmp_sun_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_sun_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14657,7 +17206,13 @@ unsafe fn test_lasx_xvfcmp_sune_d() { ); let r = i64x4::new(-1, -1, -1, -1); - assert_eq!(r, transmute(lasx_xvfcmp_sune_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_sune_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14672,7 +17227,13 @@ unsafe fn test_lasx_xvfcmp_sune_s() { ); let r = i64x4::new(-1, -1, -1, -1); - assert_eq!(r, transmute(lasx_xvfcmp_sune_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_sune_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14687,7 +17248,13 @@ unsafe fn test_lasx_xvfcmp_sun_s() { ); let r = i64x4::new(0, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvfcmp_sun_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_xvfcmp_sun_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14700,7 +17267,10 @@ unsafe fn test_lasx_xvpickve_d_f() { ); let r = i64x4::new(4605596490350167974, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvpickve_d_f::<1>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvpickve_d_f::<1>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -14711,7 +17281,10 @@ unsafe fn test_lasx_xvpickve_w_f() { ); let r = i64x4::new(1040565756, 0, 0, 0); - assert_eq!(r, transmute(lasx_xvpickve_w_f::<1>(transmute(a)))); + assert_eq!( + r, + transmute(lasx_xvpickve_w_f::<1>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lasx")] @@ -14764,7 +17337,7 @@ unsafe fn test_lasx_cast_128_s() { assert_eq!( r.as_array()[0..2], - transmute::<_, i64x4>(lasx_cast_128_s(transmute(a))).as_array()[0..2] + transmute::<_, i64x4>(lasx_cast_128_s(black_box(transmute(a)))).as_array()[0..2] ); } @@ -14780,7 +17353,7 @@ unsafe fn test_lasx_cast_128_d() { assert_eq!( r.as_array()[0..2], - transmute::<_, i64x4>(lasx_cast_128_d(transmute(a))).as_array()[0..2] + transmute::<_, i64x4>(lasx_cast_128_d(black_box(transmute(a)))).as_array()[0..2] ); } @@ -14796,7 +17369,7 @@ unsafe fn test_lasx_cast_128() { assert_eq!( r.as_array()[0..2], - transmute::<_, i64x4>(lasx_cast_128(transmute(a))).as_array()[0..2] + transmute::<_, i64x4>(lasx_cast_128(black_box(transmute(a)))).as_array()[0..2] ); } @@ -14811,7 +17384,13 @@ unsafe fn test_lasx_concat_128_s() { 4410275898954698048, ); - assert_eq!(r, transmute(lasx_concat_128_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_concat_128_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14825,7 +17404,13 @@ unsafe fn test_lasx_concat_128_d() { 4600308396523102002, ); - assert_eq!(r, transmute(lasx_concat_128_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_concat_128_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14839,7 +17424,13 @@ unsafe fn test_lasx_concat_128() { 7751541408133090748, ); - assert_eq!(r, transmute(lasx_concat_128(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_concat_128( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -14850,7 +17441,7 @@ unsafe fn test_lasx_extract_128_lo_s() { ); let r = i64x2::new(4524431379435545192, 4532741359493293580); - assert_eq!(r, transmute(lasx_extract_128_lo_s(transmute(a)))); + assert_eq!(r, transmute(lasx_extract_128_lo_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -14861,7 +17452,7 @@ unsafe fn test_lasx_extract_128_hi_s() { ); let r = i64x2::new(4572785117706267614, 4549394373627784333); - assert_eq!(r, transmute(lasx_extract_128_hi_s(transmute(a)))); + assert_eq!(r, transmute(lasx_extract_128_hi_s(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -14874,7 +17465,7 @@ unsafe fn test_lasx_extract_128_lo_d() { ); let r = i64x2::new(4606487981487128637, 4592443779247846248); - assert_eq!(r, transmute(lasx_extract_128_lo_d(transmute(a)))); + assert_eq!(r, transmute(lasx_extract_128_lo_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -14887,7 +17478,7 @@ unsafe fn test_lasx_extract_128_hi_d() { ); let r = i64x2::new(4603881047625519227, 4604218419306666352); - assert_eq!(r, transmute(lasx_extract_128_hi_d(transmute(a)))); + assert_eq!(r, transmute(lasx_extract_128_hi_d(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -14900,7 +17491,7 @@ unsafe fn test_lasx_extract_128_lo() { ); let r = i64x2::new(1690990426210778543, -1056924033489771427); - assert_eq!(r, transmute(lasx_extract_128_lo(transmute(a)))); + assert_eq!(r, transmute(lasx_extract_128_lo(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -14913,7 +17504,7 @@ unsafe fn test_lasx_extract_128_hi() { ); let r = i64x2::new(-1903780563362068813, -7449796170151383489); - assert_eq!(r, transmute(lasx_extract_128_hi(transmute(a)))); + assert_eq!(r, transmute(lasx_extract_128_hi(black_box(transmute(a))))); } #[simd_test(enable = "lasx")] @@ -14932,7 +17523,10 @@ unsafe fn test_lasx_insert_128_lo_s() { assert_eq!( r, - transmute(lasx_insert_128_lo_s(transmute(a), transmute(b))) + transmute(lasx_insert_128_lo_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -14952,7 +17546,10 @@ unsafe fn test_lasx_insert_128_hi_s() { assert_eq!( r, - transmute(lasx_insert_128_hi_s(transmute(a), transmute(b))) + transmute(lasx_insert_128_hi_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -14974,7 +17571,10 @@ unsafe fn test_lasx_insert_128_lo_d() { assert_eq!( r, - transmute(lasx_insert_128_lo_d(transmute(a), transmute(b))) + transmute(lasx_insert_128_lo_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -14996,7 +17596,10 @@ unsafe fn test_lasx_insert_128_hi_d() { assert_eq!( r, - transmute(lasx_insert_128_hi_d(transmute(a), transmute(b))) + transmute(lasx_insert_128_hi_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -15016,7 +17619,13 @@ unsafe fn test_lasx_insert_128_lo() { -4396186135186039276, ); - assert_eq!(r, transmute(lasx_insert_128_lo(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_insert_128_lo( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lasx")] @@ -15035,5 +17644,11 @@ unsafe fn test_lasx_insert_128_hi() { -7502655081590988207, ); - assert_eq!(r, transmute(lasx_insert_128_hi(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lasx_insert_128_hi( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lsx/tests.rs b/library/stdarch/crates/core_arch/src/loongarch64/lsx/tests.rs index 5670bd4378a84..748e2b597ada7 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/lsx/tests.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/lsx/tests.rs @@ -5,6 +5,7 @@ use crate::{ core_arch::{loongarch64::*, simd::*}, mem::transmute, }; +use std::hint::black_box; use stdarch_test::simd_test; #[simd_test(enable = "lsx")] @@ -17,7 +18,10 @@ unsafe fn test_lsx_vsll_b() { ); let r = i64x2::new(70990221811840, -3257029622096690968); - assert_eq!(r, transmute(lsx_vsll_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsll_b(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -26,7 +30,10 @@ unsafe fn test_lsx_vsll_h() { let b = i16x8::new(-10317, -20778, -9962, -8975, 25298, 12929, -13803, -18669); let r = i64x2::new(-5063658964307128392, -3539825456407336052); - assert_eq!(r, transmute(lsx_vsll_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsll_h(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -35,7 +42,10 @@ unsafe fn test_lsx_vsll_w() { let b = i32x4::new(82237029, -819106294, -96895338, -456101700); let r = i64x2::new(-7163824029380778240, 2305843009528266752); - assert_eq!(r, transmute(lsx_vsll_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsll_w(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -44,7 +54,10 @@ unsafe fn test_lsx_vsll_d() { let b = i64x2::new(8592669249977019309, -1379694176202045825); let r = i64x2::new(1790743801833193472, 0); - assert_eq!(r, transmute(lsx_vsll_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsll_d(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -54,7 +67,7 @@ unsafe fn test_lsx_vslli_b() { ); let r = i64x2::new(-2780807324588213414, -3708578564830607166); - assert_eq!(r, transmute(lsx_vslli_b::<0>(transmute(a)))); + assert_eq!(r, transmute(lsx_vslli_b::<0>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -62,7 +75,7 @@ unsafe fn test_lsx_vslli_h() { let a = i16x8::new(18469, -14840, 23655, -3474, 7467, 2798, -15418, 26847); let r = i64x2::new(-7241759886206301888, 4017476402818337472); - assert_eq!(r, transmute(lsx_vslli_h::<6>(transmute(a)))); + assert_eq!(r, transmute(lsx_vslli_h::<6>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -70,7 +83,7 @@ unsafe fn test_lsx_vslli_w() { let a = i32x4::new(20701902, -1777432355, 6349179, 1747667894); let r = i64x2::new(4189319625752393728, -5967594959501136896); - assert_eq!(r, transmute(lsx_vslli_w::<10>(transmute(a)))); + assert_eq!(r, transmute(lsx_vslli_w::<10>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -78,7 +91,7 @@ unsafe fn test_lsx_vslli_d() { let a = i64x2::new(-5896889635782282086, -8807609320972692839); let r = i64x2::new(-4233027607937510592, -5142337165482896608); - assert_eq!(r, transmute(lsx_vslli_d::<5>(transmute(a)))); + assert_eq!(r, transmute(lsx_vslli_d::<5>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -91,7 +104,10 @@ unsafe fn test_lsx_vsra_b() { ); let r = i64x2::new(-1080315035391229440, 720022881735668484); - assert_eq!(r, transmute(lsx_vsra_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsra_b(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -100,7 +116,10 @@ unsafe fn test_lsx_vsra_h() { let b = i16x8::new(14017, 3796, 23987, -27244, -13363, 21333, -10262, 23633); let r = i64x2::new(164116464290576704, -1935703552267190275); - assert_eq!(r, transmute(lsx_vsra_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsra_h(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -109,7 +128,10 @@ unsafe fn test_lsx_vsra_w() { let b = i32x4::new(-670772992, 2044335288, -1224858031, 520588790); let r = i64x2::new(-210763200496, 1619202657181); - assert_eq!(r, transmute(lsx_vsra_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsra_w(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -118,7 +140,10 @@ unsafe fn test_lsx_vsra_d() { let b = i64x2::new(4251079558060308329, 4657697142994416829); let r = i64x2::new(-623956, 3); - assert_eq!(r, transmute(lsx_vsra_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsra_d(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -128,7 +153,7 @@ unsafe fn test_lsx_vsrai_b() { ); let r = i64x2::new(-2018743940785760257, -2093355901512246518); - assert_eq!(r, transmute(lsx_vsrai_b::<2>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsrai_b::<2>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -136,7 +161,7 @@ unsafe fn test_lsx_vsrai_h() { let a = i16x8::new(-22502, -7299, 19084, -21578, -28082, 20851, 23456, 15524); let r = i64x2::new(-1688828385492998, 844446405361657); - assert_eq!(r, transmute(lsx_vsrai_h::<12>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsrai_h::<12>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -144,7 +169,7 @@ unsafe fn test_lsx_vsrai_w() { let a = i32x4::new(743537539, 1831641900, -1639033567, -984629971); let r = i64x2::new(30008936499988, -16131897170029); - assert_eq!(r, transmute(lsx_vsrai_w::<18>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsrai_w::<18>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -152,7 +177,7 @@ unsafe fn test_lsx_vsrai_d() { let a = i64x2::new(-8375997486414293750, 1714581574012370587); let r = i64x2::new(-476121, 97462); - assert_eq!(r, transmute(lsx_vsrai_d::<44>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsrai_d::<44>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -165,7 +190,13 @@ unsafe fn test_lsx_vsrar_b() { ); let r = i64x2::new(139917463134404866, 143840305941130491); - assert_eq!(r, transmute(lsx_vsrar_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsrar_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -174,7 +205,13 @@ unsafe fn test_lsx_vsrar_h() { let b = i16x8::new(-26450, 2176, 31587, 2222, 13726, 30172, 1067, -14273); let r = i64x2::new(-287115463426050, 42950131714); - assert_eq!(r, transmute(lsx_vsrar_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsrar_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -183,7 +220,13 @@ unsafe fn test_lsx_vsrar_w() { let b = i32x4::new(-1532076758, 940127488, 1781366421, 1497262222); let r = i64x2::new(7179867468326627830, 560544771735247); - assert_eq!(r, transmute(lsx_vsrar_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsrar_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -192,7 +235,13 @@ unsafe fn test_lsx_vsrar_d() { let b = i64x2::new(3571440266112779495, -725943254065719378); let r = i64x2::new(-890187, -17811); - assert_eq!(r, transmute(lsx_vsrar_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsrar_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -202,7 +251,7 @@ unsafe fn test_lsx_vsrari_b() { ); let r = i64x2::new(867219992078845182, -503291487652282122); - assert_eq!(r, transmute(lsx_vsrari_b::<3>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsrari_b::<3>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -210,7 +259,7 @@ unsafe fn test_lsx_vsrari_h() { let a = i16x8::new(29939, -1699, 12357, 30805, -30883, 31936, 15701, -11818); let r = i64x2::new(4222154715365391, -1688815499411471); - assert_eq!(r, transmute(lsx_vsrari_h::<11>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsrari_h::<11>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -218,7 +267,7 @@ unsafe fn test_lsx_vsrari_w() { let a = i32x4::new(588196178, -1058764534, 1325397591, 1169671026); let r = i64x2::new(-4294967295, 4294967297); - assert_eq!(r, transmute(lsx_vsrari_w::<30>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsrari_w::<30>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -226,7 +275,7 @@ unsafe fn test_lsx_vsrari_d() { let a = i64x2::new(-2795326946470057100, 6746045132217841338); let r = i64x2::new(-174707934154378569, 421627820763615084); - assert_eq!(r, transmute(lsx_vsrari_d::<4>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsrari_d::<4>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -239,7 +288,10 @@ unsafe fn test_lsx_vsrl_b() { ); let r = i64x2::new(1300161376517358116, 72917012339034650); - assert_eq!(r, transmute(lsx_vsrl_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsrl_b(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -248,7 +300,10 @@ unsafe fn test_lsx_vsrl_h() { let b = i16x8::new(16605, -13577, -26644, -17739, 11000, -29283, -15971, 20169); let r = i64x2::new(468374382728249347, 20829178341621860); - assert_eq!(r, transmute(lsx_vsrl_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsrl_h(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -257,7 +312,10 @@ unsafe fn test_lsx_vsrl_w() { let b = i32x4::new(1777885221, -1725401090, 1849724045, -1051851102); let r = i64x2::new(12953227061, 1599606693325790121); - assert_eq!(r, transmute(lsx_vsrl_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsrl_w(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -266,7 +324,10 @@ unsafe fn test_lsx_vsrl_d() { let b = i64x2::new(-7903128394835365398, 7601347629202818185); let r = i64x2::new(649044, 1572171616025062); - assert_eq!(r, transmute(lsx_vsrl_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsrl_d(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -276,7 +337,7 @@ unsafe fn test_lsx_vsrli_b() { ); let r = i64x2::new(1952909805632365845, 3971107439766933248); - assert_eq!(r, transmute(lsx_vsrli_b::<2>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsrli_b::<2>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -284,7 +345,7 @@ unsafe fn test_lsx_vsrli_h() { let a = i16x8::new(29545, 354, 27695, 20915, -32766, -24491, 10641, 20310); let r = i64x2::new(11259230996660281, 10977609996304448); - assert_eq!(r, transmute(lsx_vsrli_h::<9>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsrli_h::<9>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -292,7 +353,7 @@ unsafe fn test_lsx_vsrli_w() { let a = i32x4::new(627703601, 922874410, -234412645, -1216101872); let r = i64x2::new(3870813506329215, 12913695352717769); - assert_eq!(r, transmute(lsx_vsrli_w::<10>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsrli_w::<10>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -300,7 +361,7 @@ unsafe fn test_lsx_vsrli_d() { let a = i64x2::new(1407685950714554203, -6076144426076800688); let r = i64x2::new(9, 85); - assert_eq!(r, transmute(lsx_vsrli_d::<57>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsrli_d::<57>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -313,7 +374,13 @@ unsafe fn test_lsx_vsrlr_b() { ); let r = i64x2::new(3317746744565237249, 144420860932066826); - assert_eq!(r, transmute(lsx_vsrlr_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsrlr_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -322,7 +389,13 @@ unsafe fn test_lsx_vsrlr_h() { let b = i16x8::new(19500, -26403, -1282, 12290, -18989, 25105, -24347, 6707); let r = i64x2::new(1991716935204929539, 311033695131730530); - assert_eq!(r, transmute(lsx_vsrlr_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsrlr_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -331,7 +404,13 @@ unsafe fn test_lsx_vsrlr_w() { let b = i32x4::new(1830015593, -1452673200, 962662328, -252736055); let r = i64x2::new(7864089021084, 20473000998469780); - assert_eq!(r, transmute(lsx_vsrlr_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsrlr_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -340,7 +419,13 @@ unsafe fn test_lsx_vsrlr_d() { let b = i64x2::new(-1543621369665313706, 8544381131364512650); let r = i64x2::new(1428972826343, 4256393046182047); - assert_eq!(r, transmute(lsx_vsrlr_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsrlr_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -350,7 +435,7 @@ unsafe fn test_lsx_vsrlri_b() { ); let r = i64x2::new(93866580842851436, 1896906350202744602); - assert_eq!(r, transmute(lsx_vsrlri_b::<1>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsrlri_b::<1>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -358,7 +443,7 @@ unsafe fn test_lsx_vsrlri_h() { let a = i16x8::new(-18045, 1968, 22966, 3692, 2010, -17108, 3373, -30706); let r = i64x2::new(1039304252363684227, -8642956144778934310); - assert_eq!(r, transmute(lsx_vsrlri_h::<0>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsrlri_h::<0>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -366,7 +451,7 @@ unsafe fn test_lsx_vsrlri_w() { let a = i32x4::new(1306456564, -1401620667, -839707416, -1634862919); let r = i64x2::new(1553353645217275455, 1428132662790218397); - assert_eq!(r, transmute(lsx_vsrlri_w::<3>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsrlri_w::<3>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -374,7 +459,7 @@ unsafe fn test_lsx_vsrlri_d() { let a = i64x2::new(-3683179565838693027, 6160461828074490983); let r = i64x2::new(205, 85); - assert_eq!(r, transmute(lsx_vsrlri_d::<56>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsrlri_d::<56>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -387,7 +472,13 @@ unsafe fn test_lsx_vbitclr_b() { ); let r = i64x2::new(-7325372782311046420, -5316383129963115396); - assert_eq!(r, transmute(lsx_vbitclr_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vbitclr_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -396,7 +487,13 @@ unsafe fn test_lsx_vbitclr_h() { let b = u16x8::new(26587, 57597, 34751, 38678, 23919, 45729, 62569, 5978); let r = i64x2::new(-5495443997997256700, -3317648531059028099); - assert_eq!(r, transmute(lsx_vbitclr_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vbitclr_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -405,7 +502,13 @@ unsafe fn test_lsx_vbitclr_w() { let b = u32x4::new(1968231094, 2827365864, 4097273355, 4016923215); let r = i64x2::new(-7626667807832507452, 546969093373761021); - assert_eq!(r, transmute(lsx_vbitclr_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vbitclr_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -414,7 +517,13 @@ unsafe fn test_lsx_vbitclr_d() { let b = u64x2::new(5723204188033770667, 2981956604140378920); let r = i64x2::new(-1242851545812588193, -5509634528458855560); - assert_eq!(r, transmute(lsx_vbitclr_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vbitclr_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -424,7 +533,7 @@ unsafe fn test_lsx_vbitclri_b() { ); let r = i64x2::new(7503621968728299154, -6865556469255070542); - assert_eq!(r, transmute(lsx_vbitclri_b::<0>(transmute(a)))); + assert_eq!(r, transmute(lsx_vbitclri_b::<0>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -432,7 +541,7 @@ unsafe fn test_lsx_vbitclri_h() { let a = u16x8::new(17366, 58985, 22108, 45942, 27326, 19605, 9632, 32322); let r = i64x2::new(-5515130134779575338, 8809640793386347198); - assert_eq!(r, transmute(lsx_vbitclri_h::<10>(transmute(a)))); + assert_eq!(r, transmute(lsx_vbitclri_h::<10>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -440,7 +549,7 @@ unsafe fn test_lsx_vbitclri_w() { let a = u32x4::new(718858183, 3771164920, 1842485081, 896350597); let r = i64x2::new(-2249714073768237625, 3849796501707560281); - assert_eq!(r, transmute(lsx_vbitclri_w::<9>(transmute(a)))); + assert_eq!(r, transmute(lsx_vbitclri_w::<9>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -448,7 +557,7 @@ unsafe fn test_lsx_vbitclri_d() { let a = u64x2::new(10838658690401820648, 3833745076866321369); let r = i64x2::new(-7608085933063544856, 3833744527110507481); - assert_eq!(r, transmute(lsx_vbitclri_d::<39>(transmute(a)))); + assert_eq!(r, transmute(lsx_vbitclri_d::<39>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -461,7 +570,13 @@ unsafe fn test_lsx_vbitset_b() { ); let r = i64x2::new(-7941579666116909337, -8620998056061183460); - assert_eq!(r, transmute(lsx_vbitset_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vbitset_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -470,7 +585,13 @@ unsafe fn test_lsx_vbitset_h() { let b = u16x8::new(64512, 23847, 57770, 47705, 8024, 31966, 14493, 50266); let r = i64x2::new(8218739538452480967, 9190693790629616954); - assert_eq!(r, transmute(lsx_vbitset_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vbitset_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -479,7 +600,13 @@ unsafe fn test_lsx_vbitset_w() { let b = u32x4::new(3259082048, 1303228302, 1429001720, 209615081); let r = i64x2::new(5472281065241838073, -4235320193476931022); - assert_eq!(r, transmute(lsx_vbitset_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vbitset_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -488,7 +615,13 @@ unsafe fn test_lsx_vbitset_d() { let b = u64x2::new(12687331714071910183, 1753585392879336372); let r = i64x2::new(8117422612773760492, 5031452210401715131); - assert_eq!(r, transmute(lsx_vbitset_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vbitset_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -498,7 +631,7 @@ unsafe fn test_lsx_vbitseti_b() { ); let r = i64x2::new(6185254145054243811, 5860546440891134157); - assert_eq!(r, transmute(lsx_vbitseti_b::<6>(transmute(a)))); + assert_eq!(r, transmute(lsx_vbitseti_b::<6>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -506,7 +639,7 @@ unsafe fn test_lsx_vbitseti_h() { let a = u16x8::new(15222, 59961, 52253, 2908, 61562, 41309, 63627, 4191); let r = i64x2::new(819316619673811830, 1179934905985921146); - assert_eq!(r, transmute(lsx_vbitseti_h::<1>(transmute(a)))); + assert_eq!(r, transmute(lsx_vbitseti_h::<1>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -514,7 +647,7 @@ unsafe fn test_lsx_vbitseti_w() { let a = u32x4::new(3788412756, 1863556832, 1913138259, 1199998627); let r = i64x2::new(8012922850722617172, 5162962059379878995); - assert_eq!(r, transmute(lsx_vbitseti_w::<21>(transmute(a)))); + assert_eq!(r, transmute(lsx_vbitseti_w::<21>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -522,7 +655,7 @@ unsafe fn test_lsx_vbitseti_d() { let a = u64x2::new(10744510173660993785, 16946223211744108759); let r = i64x2::new(-7702233900048557831, -1500520861831225129); - assert_eq!(r, transmute(lsx_vbitseti_d::<27>(transmute(a)))); + assert_eq!(r, transmute(lsx_vbitseti_d::<27>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -535,7 +668,13 @@ unsafe fn test_lsx_vbitrev_b() { ); let r = i64x2::new(7553563628828981794, -3550669970358088907); - assert_eq!(r, transmute(lsx_vbitrev_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vbitrev_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -544,7 +683,13 @@ unsafe fn test_lsx_vbitrev_h() { let b = u16x8::new(21347, 23131, 57157, 13786, 34463, 33445, 23964, 48087); let r = i64x2::new(-2253077037977362312, -1686202867067838120); - assert_eq!(r, transmute(lsx_vbitrev_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vbitrev_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -553,7 +698,13 @@ unsafe fn test_lsx_vbitrev_w() { let b = u32x4::new(3330530584, 4153020036, 822570638, 2652744506); let r = i64x2::new(4583672484591007782, 3195058299616182309); - assert_eq!(r, transmute(lsx_vbitrev_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vbitrev_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -562,7 +713,13 @@ unsafe fn test_lsx_vbitrev_d() { let b = u64x2::new(10942298949673565895, 12884740754463765660); let r = i64x2::new(-2430080033105247697, -384636561250515393); - assert_eq!(r, transmute(lsx_vbitrev_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vbitrev_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -572,7 +729,7 @@ unsafe fn test_lsx_vbitrevi_b() { ); let r = i64x2::new(8727320563398842300, 7658903196653594166); - assert_eq!(r, transmute(lsx_vbitrevi_b::<2>(transmute(a)))); + assert_eq!(r, transmute(lsx_vbitrevi_b::<2>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -580,7 +737,7 @@ unsafe fn test_lsx_vbitrevi_h() { let a = u16x8::new(15083, 24599, 61212, 12408, 48399, 59833, 45416, 58826); let r = i64x2::new(8104420064785562347, -6500117680329458417); - assert_eq!(r, transmute(lsx_vbitrevi_h::<14>(transmute(a)))); + assert_eq!(r, transmute(lsx_vbitrevi_h::<14>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -588,7 +745,7 @@ unsafe fn test_lsx_vbitrevi_w() { let a = u32x4::new(1200613355, 1418062686, 3847355950, 3312937419); let r = i64x2::new(6099540060505368555, -4226793400815190482); - assert_eq!(r, transmute(lsx_vbitrevi_w::<21>(transmute(a)))); + assert_eq!(r, transmute(lsx_vbitrevi_w::<21>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -596,7 +753,7 @@ unsafe fn test_lsx_vbitrevi_d() { let a = u64x2::new(295858379748270823, 1326723086853575042); let r = i64x2::new(295858379748254439, 1326723086853591426); - assert_eq!(r, transmute(lsx_vbitrevi_d::<14>(transmute(a)))); + assert_eq!(r, transmute(lsx_vbitrevi_d::<14>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -609,7 +766,10 @@ unsafe fn test_lsx_vadd_b() { ); let r = i64x2::new(5228548393274527852, 1107461330348121713); - assert_eq!(r, transmute(lsx_vadd_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vadd_b(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -618,7 +778,10 @@ unsafe fn test_lsx_vadd_h() { let b = i16x8::new(-25040, 15453, -28080, -31322, -24429, -12453, -18073, 27019); let r = i64x2::new(1938006946753467667, 3264410328302682781); - assert_eq!(r, transmute(lsx_vadd_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vadd_h(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -627,7 +790,10 @@ unsafe fn test_lsx_vadd_w() { let b = i32x4::new(-1169804484, 389773725, -731843701, -1825112934); let r = i64x2::new(-2841313158179161935, -1386205072290870384); - assert_eq!(r, transmute(lsx_vadd_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vadd_w(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -636,7 +802,10 @@ unsafe fn test_lsx_vadd_d() { let b = i64x2::new(7093939531558864473, 4047047970310912233); let r = i64x2::new(-204689461315224217, -5456447511965942904); - assert_eq!(r, transmute(lsx_vadd_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vadd_d(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -646,7 +815,7 @@ unsafe fn test_lsx_vaddi_bu() { ); let r = i64x2::new(-7790681010872578420, 298548864442153210); - assert_eq!(r, transmute(lsx_vaddi_bu::<10>(transmute(a)))); + assert_eq!(r, transmute(lsx_vaddi_bu::<10>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -654,7 +823,7 @@ unsafe fn test_lsx_vaddi_hu() { let a = i16x8::new(-16986, -28417, 11657, 16608, -30167, 18602, 8897, -854); let r = i64x2::new(4681541984598867390, -233585914045887935); - assert_eq!(r, transmute(lsx_vaddi_hu::<24>(transmute(a)))); + assert_eq!(r, transmute(lsx_vaddi_hu::<24>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -662,7 +831,7 @@ unsafe fn test_lsx_vaddi_wu() { let a = i32x4::new(1142343549, 56714754, -180143297, 408668191); let r = i64x2::new(243588023362963327, 1755216527965240129); - assert_eq!(r, transmute(lsx_vaddi_wu::<2>(transmute(a)))); + assert_eq!(r, transmute(lsx_vaddi_wu::<2>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -670,7 +839,7 @@ unsafe fn test_lsx_vaddi_du() { let a = i64x2::new(4516502893749962130, 9158051921593642947); let r = i64x2::new(4516502893749962139, 9158051921593642956); - assert_eq!(r, transmute(lsx_vaddi_du::<9>(transmute(a)))); + assert_eq!(r, transmute(lsx_vaddi_du::<9>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -683,7 +852,10 @@ unsafe fn test_lsx_vsub_b() { ); let r = i64x2::new(-4051929421319416371, 8737463450488952169); - assert_eq!(r, transmute(lsx_vsub_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsub_b(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -692,7 +864,10 @@ unsafe fn test_lsx_vsub_h() { let b = i16x8::new(15368, 16207, 9677, 21447, -29583, -22036, 1845, 15671); let r = i64x2::new(-913983189443969573, 2742472381424198215); - assert_eq!(r, transmute(lsx_vsub_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsub_h(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -701,7 +876,10 @@ unsafe fn test_lsx_vsub_w() { let b = i32x4::new(617176389, -1376778690, 1463940361, 620446698); let r = i64x2::new(-7247543435452521192, -8067077040042720878); - assert_eq!(r, transmute(lsx_vsub_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsub_w(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -710,7 +888,10 @@ unsafe fn test_lsx_vsub_d() { let b = i64x2::new(1314101702815749241, 7673634401554993450); let r = i64x2::new(5925090640479842026, 5645651807574135757); - assert_eq!(r, transmute(lsx_vsub_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsub_d(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -720,7 +901,7 @@ unsafe fn test_lsx_vsubi_bu() { ); let r = i64x2::new(-8192169673836457574, 4758493248402185941); - assert_eq!(r, transmute(lsx_vsubi_bu::<19>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsubi_bu::<19>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -728,7 +909,7 @@ unsafe fn test_lsx_vsubi_hu() { let a = i16x8::new(13272, -26858, -235, 16054, 29698, 1377, 4604, -3878); let r = i64x2::new(4514576075959186376, -1096043853912116238); - assert_eq!(r, transmute(lsx_vsubi_hu::<16>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsubi_hu::<16>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -736,7 +917,7 @@ unsafe fn test_lsx_vsubi_wu() { let a = i32x4::new(1277091145, -2076591216, -1523555105, -945754023); let r = i64x2::new(-8918891362898748088, -4061982600368986914); - assert_eq!(r, transmute(lsx_vsubi_wu::<1>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsubi_wu::<1>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -744,7 +925,7 @@ unsafe fn test_lsx_vsubi_du() { let a = i64x2::new(-8248876128472283209, -2119651236628000925); let r = i64x2::new(-8248876128472283234, -2119651236628000950); - assert_eq!(r, transmute(lsx_vsubi_du::<25>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsubi_du::<25>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -757,7 +938,10 @@ unsafe fn test_lsx_vmax_b() { ); let r = i64x2::new(1260734548147228113, 7591133008682590587); - assert_eq!(r, transmute(lsx_vmax_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmax_b(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -766,7 +950,10 @@ unsafe fn test_lsx_vmax_h() { let b = i16x8::new(25637, -11569, -23103, 6983, -17125, 5183, -709, 5986); let r = i64x2::new(1965654441534120997, 1684966995419662474); - assert_eq!(r, transmute(lsx_vmax_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmax_h(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -775,7 +962,10 @@ unsafe fn test_lsx_vmax_w() { let b = i32x4::new(643859790, -389733899, -1309288060, 1934346522); let r = i64x2::new(-1673894349703707314, 8307955054730158361); - assert_eq!(r, transmute(lsx_vmax_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmax_w(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -784,7 +974,10 @@ unsafe fn test_lsx_vmax_d() { let b = i64x2::new(-6137495199657896371, 2160025776787809810); let r = i64x2::new(-990960773872867733, 6406870358170165030); - assert_eq!(r, transmute(lsx_vmax_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmax_d(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -794,7 +987,7 @@ unsafe fn test_lsx_vmaxi_b() { ); let r = i64x2::new(5908253215318699518, 1728939149412407162); - assert_eq!(r, transmute(lsx_vmaxi_b::<-2>(transmute(a)))); + assert_eq!(r, transmute(lsx_vmaxi_b::<-2>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -802,7 +995,7 @@ unsafe fn test_lsx_vmaxi_h() { let a = i16x8::new(-14059, 19536, 15816, 28251, 23079, -10486, -11781, 25565); let r = i64x2::new(7952017497535807498, 7195907822558272039); - assert_eq!(r, transmute(lsx_vmaxi_h::<10>(transmute(a)))); + assert_eq!(r, transmute(lsx_vmaxi_h::<10>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -810,7 +1003,7 @@ unsafe fn test_lsx_vmaxi_w() { let a = i32x4::new(-1136628686, -168033999, -2082324641, -1789957469); let r = i64x2::new(55834574861, 55834574861); - assert_eq!(r, transmute(lsx_vmaxi_w::<13>(transmute(a)))); + assert_eq!(r, transmute(lsx_vmaxi_w::<13>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -818,7 +1011,7 @@ unsafe fn test_lsx_vmaxi_d() { let a = i64x2::new(-490958606840895025, -602287987736508723); let r = i64x2::new(-5, -5); - assert_eq!(r, transmute(lsx_vmaxi_d::<-5>(transmute(a)))); + assert_eq!(r, transmute(lsx_vmaxi_d::<-5>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -831,7 +1024,13 @@ unsafe fn test_lsx_vmax_bu() { ); let r = i64x2::new(-5712542810735052010, 4588590651995571688); - assert_eq!(r, transmute(lsx_vmax_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmax_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -840,7 +1039,13 @@ unsafe fn test_lsx_vmax_hu() { let b = u16x8::new(61508, 27224, 11696, 15294, 30725, 4809, 55995, 24012); let r = i64x2::new(6366821095949791300, 6759017637785204741); - assert_eq!(r, transmute(lsx_vmax_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmax_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -849,7 +1054,13 @@ unsafe fn test_lsx_vmax_wu() { let b = u32x4::new(2856502284, 546582019, 3814541188, 2370198139); let r = i64x2::new(2347551899043152908, -8266820577849948284); - assert_eq!(r, transmute(lsx_vmax_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmax_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -858,7 +1069,13 @@ unsafe fn test_lsx_vmax_du() { let b = u64x2::new(15559502733477870114, 3537017767853389449); let r = i64x2::new(-1341110034690820781, -6520089917898609068); - assert_eq!(r, transmute(lsx_vmax_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmax_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -868,7 +1085,7 @@ unsafe fn test_lsx_vmaxi_bu() { ); let r = i64x2::new(-1045930669804428840, -8076220938123067729); - assert_eq!(r, transmute(lsx_vmaxi_bu::<27>(transmute(a)))); + assert_eq!(r, transmute(lsx_vmaxi_bu::<27>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -876,7 +1093,7 @@ unsafe fn test_lsx_vmaxi_hu() { let a = u16x8::new(56394, 18974, 59, 64239, 15178, 38205, 20044, 21066); let r = i64x2::new(-365072790147113910, 5929637950214978378); - assert_eq!(r, transmute(lsx_vmaxi_hu::<23>(transmute(a)))); + assert_eq!(r, transmute(lsx_vmaxi_hu::<23>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -884,7 +1101,7 @@ unsafe fn test_lsx_vmaxi_wu() { let a = u32x4::new(2234002286, 3837532269, 3218694441, 2956128392); let r = i64x2::new(-1964668478775874706, -5750269304073789143); - assert_eq!(r, transmute(lsx_vmaxi_wu::<15>(transmute(a)))); + assert_eq!(r, transmute(lsx_vmaxi_wu::<15>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -892,7 +1109,7 @@ unsafe fn test_lsx_vmaxi_du() { let a = u64x2::new(3145066433415682744, 697260191203805367); let r = i64x2::new(3145066433415682744, 697260191203805367); - assert_eq!(r, transmute(lsx_vmaxi_du::<15>(transmute(a)))); + assert_eq!(r, transmute(lsx_vmaxi_du::<15>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -905,7 +1122,10 @@ unsafe fn test_lsx_vmin_b() { ); let r = i64x2::new(1870285769536668398, -8941449826914199819); - assert_eq!(r, transmute(lsx_vmin_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmin_b(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -914,7 +1134,10 @@ unsafe fn test_lsx_vmin_h() { let b = i16x8::new(-5519, 15267, -28304, -5842, 32145, 6582, -9646, -24918); let r = i64x2::new(-1644216902720689551, -7013553423522578637); - assert_eq!(r, transmute(lsx_vmin_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmin_h(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -923,7 +1146,10 @@ unsafe fn test_lsx_vmin_w() { let b = i32x4::new(-425011290, -2104111279, 175390640, 571448257); let r = i64x2::new(-9037089126579775578, 2454351575346593712); - assert_eq!(r, transmute(lsx_vmin_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmin_w(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -932,7 +1158,10 @@ unsafe fn test_lsx_vmin_d() { let b = i64x2::new(7269804448576860985, -2384075780126369706); let r = i64x2::new(5262417572890363865, -2384075780126369706); - assert_eq!(r, transmute(lsx_vmin_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmin_d(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -942,7 +1171,7 @@ unsafe fn test_lsx_vmini_b() { ); let r = i64x2::new(-1187557278141451540, -940475489144045070); - assert_eq!(r, transmute(lsx_vmini_b::<-14>(transmute(a)))); + assert_eq!(r, transmute(lsx_vmini_b::<-14>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -950,7 +1179,7 @@ unsafe fn test_lsx_vmini_h() { let a = i16x8::new(26119, -26421, -26720, 11534, 11181, -13024, -9525, -1565); let r = i64x2::new(-677708916064259, -440267769697468419); - assert_eq!(r, transmute(lsx_vmini_h::<-3>(transmute(a)))); + assert_eq!(r, transmute(lsx_vmini_h::<-3>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -958,7 +1187,7 @@ unsafe fn test_lsx_vmini_w() { let a = i32x4::new(1937226480, -56354461, -210581139, 118641668); let r = i64x2::new(-242040566978707451, 25559222637); - assert_eq!(r, transmute(lsx_vmini_w::<5>(transmute(a)))); + assert_eq!(r, transmute(lsx_vmini_w::<5>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -966,7 +1195,7 @@ unsafe fn test_lsx_vmini_d() { let a = i64x2::new(-6839357499730806877, 2982085289136510651); let r = i64x2::new(-6839357499730806877, 11); - assert_eq!(r, transmute(lsx_vmini_d::<11>(transmute(a)))); + assert_eq!(r, transmute(lsx_vmini_d::<11>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -979,7 +1208,13 @@ unsafe fn test_lsx_vmin_bu() { ); let r = i64x2::new(3617816997909406996, 4784078933357220137); - assert_eq!(r, transmute(lsx_vmin_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmin_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -988,7 +1223,13 @@ unsafe fn test_lsx_vmin_hu() { let b = u16x8::new(30424, 14541, 7654, 46014, 42452, 14971, 14903, 13871); let r = i64x2::new(-5494921620712753448, 3904403410832303572); - assert_eq!(r, transmute(lsx_vmin_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmin_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -997,7 +1238,13 @@ unsafe fn test_lsx_vmin_wu() { let b = u32x4::new(1456829356, 2264966310, 1587887390, 645429404); let r = i64x2::new(-8718787844260924500, 2772098183187911585); - assert_eq!(r, transmute(lsx_vmin_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmin_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1006,7 +1253,13 @@ unsafe fn test_lsx_vmin_du() { let b = u64x2::new(15079551366517035256, 13891052596545854864); let r = i64x2::new(6641707046382446478, 5750385968612732680); - assert_eq!(r, transmute(lsx_vmin_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmin_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1016,7 +1269,7 @@ unsafe fn test_lsx_vmini_bu() { ); let r = i64x2::new(361700864190383365, 361700864190317829); - assert_eq!(r, transmute(lsx_vmini_bu::<5>(transmute(a)))); + assert_eq!(r, transmute(lsx_vmini_bu::<5>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1024,7 +1277,7 @@ unsafe fn test_lsx_vmini_hu() { let a = u16x8::new(51791, 41830, 16737, 31634, 36341, 58491, 48701, 8690); let r = i64x2::new(5066626891382802, 5066626891382802); - assert_eq!(r, transmute(lsx_vmini_hu::<18>(transmute(a)))); + assert_eq!(r, transmute(lsx_vmini_hu::<18>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1032,7 +1285,7 @@ unsafe fn test_lsx_vmini_wu() { let a = u32x4::new(1158888991, 2639721369, 556001789, 2902942998); let r = i64x2::new(77309411346, 77309411346); - assert_eq!(r, transmute(lsx_vmini_wu::<18>(transmute(a)))); + assert_eq!(r, transmute(lsx_vmini_wu::<18>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1040,7 +1293,7 @@ unsafe fn test_lsx_vmini_du() { let a = u64x2::new(17903595768445663391, 13119300660970895532); let r = i64x2::new(13, 13); - assert_eq!(r, transmute(lsx_vmini_du::<13>(transmute(a)))); + assert_eq!(r, transmute(lsx_vmini_du::<13>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1053,7 +1306,10 @@ unsafe fn test_lsx_vseq_b() { ); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vseq_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vseq_b(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -1062,7 +1318,10 @@ unsafe fn test_lsx_vseq_h() { let b = i16x8::new(-7387, -24074, 15709, -4629, 30465, -9504, -21403, -30287); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vseq_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vseq_h(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -1071,7 +1330,10 @@ unsafe fn test_lsx_vseq_w() { let b = i32x4::new(-493722413, -522973881, -1254416384, -884207273); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vseq_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vseq_w(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -1080,7 +1342,10 @@ unsafe fn test_lsx_vseq_d() { let b = i64x2::new(3023654898382436999, 1783520577741396523); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vseq_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vseq_d(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -1090,7 +1355,7 @@ unsafe fn test_lsx_vseqi_b() { ); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vseqi_b::<12>(transmute(a)))); + assert_eq!(r, transmute(lsx_vseqi_b::<12>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1098,7 +1363,7 @@ unsafe fn test_lsx_vseqi_h() { let a = i16x8::new(-3205, 25452, 20774, 22065, -8424, 16590, -15971, -14154); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vseqi_h::<-1>(transmute(a)))); + assert_eq!(r, transmute(lsx_vseqi_h::<-1>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1106,7 +1371,7 @@ unsafe fn test_lsx_vseqi_w() { let a = i32x4::new(199798215, -798304779, -1812193878, -1830438161); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vseqi_w::<11>(transmute(a)))); + assert_eq!(r, transmute(lsx_vseqi_w::<11>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1114,7 +1379,7 @@ unsafe fn test_lsx_vseqi_d() { let a = i64x2::new(-7376858177879278972, 1947027764115386661); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vseqi_d::<3>(transmute(a)))); + assert_eq!(r, transmute(lsx_vseqi_d::<3>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1124,7 +1389,7 @@ unsafe fn test_lsx_vslti_b() { ); let r = i64x2::new(-1099511627776, 1095216660480); - assert_eq!(r, transmute(lsx_vslti_b::<-4>(transmute(a)))); + assert_eq!(r, transmute(lsx_vslti_b::<-4>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1137,7 +1402,10 @@ unsafe fn test_lsx_vslt_b() { ); let r = i64x2::new(-72056494526365441, -280375465148416); - assert_eq!(r, transmute(lsx_vslt_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vslt_b(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -1146,7 +1414,10 @@ unsafe fn test_lsx_vslt_h() { let b = i16x8::new(-10624, 12762, 31216, 13253, 2299, -12591, -8652, -22348); let r = i64x2::new(-4294967296, 65535); - assert_eq!(r, transmute(lsx_vslt_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vslt_h(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -1155,7 +1426,10 @@ unsafe fn test_lsx_vslt_w() { let b = i32x4::new(-1849021639, -756143028, 54274044, 646446450); let r = i64x2::new(-4294967296, -1); - assert_eq!(r, transmute(lsx_vslt_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vslt_w(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -1164,7 +1438,10 @@ unsafe fn test_lsx_vslt_d() { let b = i64x2::new(1481173131774551907, 270656941607020532); let r = i64x2::new(-1, 0); - assert_eq!(r, transmute(lsx_vslt_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vslt_d(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -1172,7 +1449,7 @@ unsafe fn test_lsx_vslti_h() { let a = i16x8::new(-8902, 5527, 17224, -27356, 4424, 28839, 29975, 18805); let r = i64x2::new(-281474976645121, 0); - assert_eq!(r, transmute(lsx_vslti_h::<14>(transmute(a)))); + assert_eq!(r, transmute(lsx_vslti_h::<14>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1180,7 +1457,7 @@ unsafe fn test_lsx_vslti_w() { let a = i32x4::new(995282502, -1964668207, -996118772, 1812234755); let r = i64x2::new(-4294967296, 4294967295); - assert_eq!(r, transmute(lsx_vslti_w::<14>(transmute(a)))); + assert_eq!(r, transmute(lsx_vslti_w::<14>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1188,7 +1465,7 @@ unsafe fn test_lsx_vslti_d() { let a = i64x2::new(1441753618400573134, 3878439049744730841); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vslti_d::<14>(transmute(a)))); + assert_eq!(r, transmute(lsx_vslti_d::<14>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1201,7 +1478,13 @@ unsafe fn test_lsx_vslt_bu() { ); let r = i64x2::new(-281474959998721, -72057589742960896); - assert_eq!(r, transmute(lsx_vslt_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vslt_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1210,7 +1493,13 @@ unsafe fn test_lsx_vslt_hu() { let b = u16x8::new(513, 13075, 20319, 44422, 12609, 18638, 20227, 21354); let r = i64x2::new(281474976645120, -281474976645121); - assert_eq!(r, transmute(lsx_vslt_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vslt_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1219,7 +1508,13 @@ unsafe fn test_lsx_vslt_wu() { let b = u32x4::new(1402243125, 1129899238, 2591537060, 4152171743); let r = i64x2::new(4294967295, -1); - assert_eq!(r, transmute(lsx_vslt_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vslt_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1228,7 +1523,13 @@ unsafe fn test_lsx_vslt_du() { let b = u64x2::new(835355141719377733, 10472626544222695938); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vslt_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vslt_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1238,7 +1539,7 @@ unsafe fn test_lsx_vslti_bu() { ); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vslti_bu::<7>(transmute(a)))); + assert_eq!(r, transmute(lsx_vslti_bu::<7>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1246,7 +1547,7 @@ unsafe fn test_lsx_vslti_hu() { let a = u16x8::new(60550, 12178, 30950, 44771, 25514, 35987, 55940, 21614); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vslti_hu::<2>(transmute(a)))); + assert_eq!(r, transmute(lsx_vslti_hu::<2>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1254,7 +1555,7 @@ unsafe fn test_lsx_vslti_wu() { let a = u32x4::new(912580668, 18660032, 3405726641, 4033549497); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vslti_wu::<8>(transmute(a)))); + assert_eq!(r, transmute(lsx_vslti_wu::<8>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1262,7 +1563,7 @@ unsafe fn test_lsx_vslti_du() { let a = u64x2::new(17196150830761730262, 5893061291971214149); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vslti_du::<14>(transmute(a)))); + assert_eq!(r, transmute(lsx_vslti_du::<14>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1275,7 +1576,10 @@ unsafe fn test_lsx_vsle_b() { ); let r = i64x2::new(281470681808895, 280375465148415); - assert_eq!(r, transmute(lsx_vsle_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsle_b(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -1284,7 +1588,10 @@ unsafe fn test_lsx_vsle_h() { let b = i16x8::new(-30602, -9535, 10944, 3343, -1093, 6600, -19453, -4561); let r = i64x2::new(281470681743360, -281470681808896); - assert_eq!(r, transmute(lsx_vsle_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsle_h(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -1293,7 +1600,10 @@ unsafe fn test_lsx_vsle_w() { let b = i32x4::new(-1810853975, 2021418524, 215198844, 1124361386); let r = i64x2::new(-4294967296, -4294967296); - assert_eq!(r, transmute(lsx_vsle_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsle_w(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -1302,7 +1612,10 @@ unsafe fn test_lsx_vsle_d() { let b = i64x2::new(71694374951002423, -4307912969104303925); let r = i64x2::new(-1, 0); - assert_eq!(r, transmute(lsx_vsle_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsle_d(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -1312,7 +1625,7 @@ unsafe fn test_lsx_vslei_b() { ); let r = i64x2::new(72056494526365440, 280375465082880); - assert_eq!(r, transmute(lsx_vslei_b::<3>(transmute(a)))); + assert_eq!(r, transmute(lsx_vslei_b::<3>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1320,7 +1633,7 @@ unsafe fn test_lsx_vslei_h() { let a = i16x8::new(31276, -16628, -30006, -20587, 2104, -30062, 18261, -6449); let r = i64x2::new(-65536, -281470681808896); - assert_eq!(r, transmute(lsx_vslei_h::<-3>(transmute(a)))); + assert_eq!(r, transmute(lsx_vslei_h::<-3>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1328,7 +1641,7 @@ unsafe fn test_lsx_vslei_w() { let a = i32x4::new(-1890390435, 1289536678, 1490122113, 2120063492); let r = i64x2::new(4294967295, 0); - assert_eq!(r, transmute(lsx_vslei_w::<-16>(transmute(a)))); + assert_eq!(r, transmute(lsx_vslei_w::<-16>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1336,7 +1649,7 @@ unsafe fn test_lsx_vslei_d() { let a = i64x2::new(-123539898448811963, 8007480165241051883); let r = i64x2::new(-1, 0); - assert_eq!(r, transmute(lsx_vslei_d::<8>(transmute(a)))); + assert_eq!(r, transmute(lsx_vslei_d::<8>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1349,7 +1662,13 @@ unsafe fn test_lsx_vsle_bu() { ); let r = i64x2::new(1095216660480, 72057594021150720); - assert_eq!(r, transmute(lsx_vsle_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsle_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1358,7 +1677,13 @@ unsafe fn test_lsx_vsle_hu() { let b = u16x8::new(50529, 35111, 24746, 62465, 21587, 30574, 11054, 11653); let r = i64x2::new(-4294967296, 281474976710655); - assert_eq!(r, transmute(lsx_vsle_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsle_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1367,7 +1692,13 @@ unsafe fn test_lsx_vsle_wu() { let b = u32x4::new(1321018603, 1091195011, 3525236625, 4061062671); let r = i64x2::new(0, -1); - assert_eq!(r, transmute(lsx_vsle_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsle_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1376,7 +1707,13 @@ unsafe fn test_lsx_vsle_du() { let b = u64x2::new(16044633718831874991, 3531311371811276914); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vsle_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsle_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1386,7 +1723,7 @@ unsafe fn test_lsx_vslei_bu() { ); let r = i64x2::new(71776119061217280, 280375465082880); - assert_eq!(r, transmute(lsx_vslei_bu::<18>(transmute(a)))); + assert_eq!(r, transmute(lsx_vslei_bu::<18>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1394,7 +1731,7 @@ unsafe fn test_lsx_vslei_hu() { let a = u16x8::new(1430, 10053, 35528, 28458, 2394, 22098, 40236, 20853); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vslei_hu::<10>(transmute(a)))); + assert_eq!(r, transmute(lsx_vslei_hu::<10>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1402,7 +1739,7 @@ unsafe fn test_lsx_vslei_wu() { let a = u32x4::new(3289026584, 3653636092, 2919866047, 2895662832); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vslei_wu::<2>(transmute(a)))); + assert_eq!(r, transmute(lsx_vslei_wu::<2>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1410,7 +1747,7 @@ unsafe fn test_lsx_vslei_du() { let a = u64x2::new(17462377852989253439, 17741928456729041079); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vslei_du::<12>(transmute(a)))); + assert_eq!(r, transmute(lsx_vslei_du::<12>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1420,7 +1757,7 @@ unsafe fn test_lsx_vsat_b() { ); let r = i64x2::new(-2964542792447819074, 3186937137643144200); - assert_eq!(r, transmute(lsx_vsat_b::<7>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsat_b::<7>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1428,7 +1765,7 @@ unsafe fn test_lsx_vsat_h() { let a = i16x8::new(-22234, -8008, -23350, 13768, 26313, -27447, -3569, 6025); let r = i64x2::new(576451960371214336, 576451960371152895); - assert_eq!(r, transmute(lsx_vsat_h::<11>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsat_h::<11>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1436,7 +1773,7 @@ unsafe fn test_lsx_vsat_w() { let a = i32x4::new(-84179653, 874415975, 1823119516, 1667850968); let r = i64x2::new(137438953440, 133143986207); - assert_eq!(r, transmute(lsx_vsat_w::<5>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsat_w::<5>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1444,7 +1781,7 @@ unsafe fn test_lsx_vsat_d() { let a = i64x2::new(6859869867233872152, 2514172105675226457); let r = i64x2::new(262143, 262143); - assert_eq!(r, transmute(lsx_vsat_d::<18>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsat_d::<18>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1454,7 +1791,7 @@ unsafe fn test_lsx_vsat_bu() { ); let r = i64x2::new(2125538672170008439, 6577605268441825038); - assert_eq!(r, transmute(lsx_vsat_bu::<6>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsat_bu::<6>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1462,7 +1799,7 @@ unsafe fn test_lsx_vsat_hu() { let a = u16x8::new(36681, 34219, 6160, 8687, 4544, 20195, 35034, 916); let r = i64x2::new(287953294993589247, 257835472485549055); - assert_eq!(r, transmute(lsx_vsat_hu::<9>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsat_hu::<9>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1470,7 +1807,7 @@ unsafe fn test_lsx_vsat_wu() { let a = u32x4::new(1758000759, 4138051566, 2705324001, 3927640324); let r = i64x2::new(70364449226751, 70364449226751); - assert_eq!(r, transmute(lsx_vsat_wu::<13>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsat_wu::<13>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1478,7 +1815,7 @@ unsafe fn test_lsx_vsat_du() { let a = u64x2::new(1953136817312581670, 2606878300382729363); let r = i64x2::new(9007199254740991, 9007199254740991); - assert_eq!(r, transmute(lsx_vsat_du::<52>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsat_du::<52>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -1491,7 +1828,13 @@ unsafe fn test_lsx_vadda_b() { ); let r = i64x2::new(8248499858970022011, 8535863472581999270); - assert_eq!(r, transmute(lsx_vadda_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vadda_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1500,7 +1843,13 @@ unsafe fn test_lsx_vadda_h() { let b = i16x8::new(-21543, 21720, 14529, -19143, -28953, 13450, 8037, 29413); let r = i64x2::new(-8646732423142600033, 8924050915627474398); - assert_eq!(r, transmute(lsx_vadda_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vadda_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1509,7 +1858,13 @@ unsafe fn test_lsx_vadda_w() { let b = i32x4::new(287041349, 249467792, 312776520, 1314435078); let r = i64x2::new(8345875378983299469, 6092442344252138029); - assert_eq!(r, transmute(lsx_vadda_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vadda_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1518,7 +1873,13 @@ unsafe fn test_lsx_vadda_d() { let b = i64x2::new(-4324432602362661920, 6402427893748093984); let r = i64x2::new(6071741662385212188, -5328622052402301597); - assert_eq!(r, transmute(lsx_vadda_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vadda_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1531,7 +1892,13 @@ unsafe fn test_lsx_vsadd_b() { ); let r = i64x2::new(-3422653801050278697, 1909270979770548186); - assert_eq!(r, transmute(lsx_vsadd_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsadd_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1540,7 +1907,13 @@ unsafe fn test_lsx_vsadd_h() { let b = i16x8::new(26970, 17131, 15547, -7614, -8479, 22338, 3567, -22299); let r = i64x2::new(6720170624686097630, -304244782337649222); - assert_eq!(r, transmute(lsx_vsadd_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsadd_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1549,7 +1922,13 @@ unsafe fn test_lsx_vsadd_w() { let b = i32x4::new(-1026388582, 222487110, 501504960, -1863994162); let r = i64x2::new(-6565289918505943040, -6915373914453178024); - assert_eq!(r, transmute(lsx_vsadd_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsadd_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1558,7 +1937,13 @@ unsafe fn test_lsx_vsadd_d() { let b = i64x2::new(-6599608819082608284, -5088169537193133686); let r = i64x2::new(-8567396806692999839, -9223372036854775808); - assert_eq!(r, transmute(lsx_vsadd_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsadd_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1571,7 +1956,13 @@ unsafe fn test_lsx_vsadd_bu() { ); let r = i64x2::new(-5404438145481572386, -7318352348905473); - assert_eq!(r, transmute(lsx_vsadd_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsadd_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1580,7 +1971,13 @@ unsafe fn test_lsx_vsadd_hu() { let b = u16x8::new(31219, 59227, 25607, 62798, 18845, 3238, 19902, 24978); let r = i64x2::new(-8740258447361, -136834913009665); - assert_eq!(r, transmute(lsx_vsadd_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsadd_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1589,7 +1986,13 @@ unsafe fn test_lsx_vsadd_wu() { let b = u32x4::new(3676524021, 3894343575, 904432536, 1616820031); let r = i64x2::new(-1, -7583652642497232897); - assert_eq!(r, transmute(lsx_vsadd_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsadd_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1598,7 +2001,13 @@ unsafe fn test_lsx_vsadd_du() { let b = u64x2::new(11054638512585704882, 3549000132135395099); let r = i64x2::new(-3651327027786652925, -623479558932885349); - assert_eq!(r, transmute(lsx_vsadd_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsadd_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1611,7 +2020,10 @@ unsafe fn test_lsx_vavg_b() { ); let r = i64x2::new(-152206416164856247, 4369276355735447089); - assert_eq!(r, transmute(lsx_vavg_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vavg_b(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -1620,7 +2032,10 @@ unsafe fn test_lsx_vavg_h() { let b = i16x8::new(-3088, -25854, -32552, -8417, 7808, -12495, 22032, -5168); let r = i64x2::new(696836182083297626, -4337760619710117321); - assert_eq!(r, transmute(lsx_vavg_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vavg_h(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -1629,7 +2044,10 @@ unsafe fn test_lsx_vavg_w() { let b = i32x4::new(-324844828, -1580060766, -1909832882, 328273785); let r = i64x2::new(475428188150908257, 4521676108535152711); - assert_eq!(r, transmute(lsx_vavg_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vavg_w(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -1638,7 +2056,10 @@ unsafe fn test_lsx_vavg_d() { let b = i64x2::new(3169904420607189220, 5159962511251707672); let r = i64x2::new(2328313764472338215, 5669256157716045974); - assert_eq!(r, transmute(lsx_vavg_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vavg_d(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -1651,7 +2072,13 @@ unsafe fn test_lsx_vavg_bu() { ); let r = i64x2::new(-5663745084945885565, 2801126043194071837); - assert_eq!(r, transmute(lsx_vavg_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vavg_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1660,7 +2087,13 @@ unsafe fn test_lsx_vavg_hu() { let b = u16x8::new(44835, 36733, 12115, 42874, 4819, 12201, 27397, 25394); let r = i64x2::new(-4196978047981735086, -6439149718662907396); - assert_eq!(r, transmute(lsx_vavg_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vavg_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1669,7 +2102,13 @@ unsafe fn test_lsx_vavg_wu() { let b = u32x4::new(160886383, 26081142, 459122380, 2523086630); let r = i64x2::new(123816739188229069, -5586965600173345916); - assert_eq!(r, transmute(lsx_vavg_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vavg_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1678,7 +2117,13 @@ unsafe fn test_lsx_vavg_du() { let b = u64x2::new(9749063966076740681, 5963120178993456389); let r = i64x2::new(-7770235857859936532, 7939635441364553211); - assert_eq!(r, transmute(lsx_vavg_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vavg_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1691,7 +2136,13 @@ unsafe fn test_lsx_vavgr_b() { ); let r = i64x2::new(1883712581662731545, -1226681417271426582); - assert_eq!(r, transmute(lsx_vavgr_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vavgr_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1700,7 +2151,13 @@ unsafe fn test_lsx_vavgr_h() { let b = i16x8::new(-9758, -8332, 20577, 31066, 31120, 14788, -22323, 16722); let r = i64x2::new(3801916629507170613, 3994084079587580569); - assert_eq!(r, transmute(lsx_vavgr_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vavgr_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1709,7 +2166,13 @@ unsafe fn test_lsx_vavgr_w() { let b = i32x4::new(1278058715, -155858446, -195547847, -750518746); let r = i64x2::new(4040594005688324125, -5795079921582298726); - assert_eq!(r, transmute(lsx_vavgr_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vavgr_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1718,7 +2181,13 @@ unsafe fn test_lsx_vavgr_d() { let b = i64x2::new(8758126674980055299, -7441643514470614533); let r = i64x2::new(3399991646978312393, -1904131665097658207); - assert_eq!(r, transmute(lsx_vavgr_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vavgr_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1731,7 +2200,13 @@ unsafe fn test_lsx_vavgr_bu() { ); let r = i64x2::new(9122444831751176042, 6010164553039771699); - assert_eq!(r, transmute(lsx_vavgr_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vavgr_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1740,7 +2215,13 @@ unsafe fn test_lsx_vavgr_hu() { let b = u16x8::new(26111, 34713, 61420, 23702, 29204, 9543, 62786, 7043); let r = i64x2::new(7022187818705851223, 4754859411904311722); - assert_eq!(r, transmute(lsx_vavgr_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vavgr_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1749,7 +2230,13 @@ unsafe fn test_lsx_vavgr_wu() { let b = u32x4::new(1930150361, 3668628165, 2983921396, 2410913126); let r = i64x2::new(-5401180487351753235, 8140240017388800980); - assert_eq!(r, transmute(lsx_vavgr_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vavgr_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1758,7 +2245,13 @@ unsafe fn test_lsx_vavgr_du() { let b = u64x2::new(8650759135311802962, 11380630663742852932); let r = i64x2::new(6046550632940509412, 8095423581736830430); - assert_eq!(r, transmute(lsx_vavgr_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vavgr_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1771,7 +2264,13 @@ unsafe fn test_lsx_vssub_b() { ); let r = i64x2::new(628822736562549631, -9187601072510296593); - assert_eq!(r, transmute(lsx_vssub_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssub_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1780,7 +2279,13 @@ unsafe fn test_lsx_vssub_h() { let b = i16x8::new(-26027, 6118, -13204, 25080, 12458, 8441, 24701, 11617); let r = i64x2::new(-9223231300041015297, 1942699741282756937); - assert_eq!(r, transmute(lsx_vssub_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssub_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1789,7 +2294,13 @@ unsafe fn test_lsx_vssub_w() { let b = i32x4::new(-1808829767, 2144666490, 146236682, 1180114488); let r = i64x2::new(-9223372035405031217, -177933965588659662); - assert_eq!(r, transmute(lsx_vssub_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssub_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1798,7 +2309,13 @@ unsafe fn test_lsx_vssub_d() { let b = i64x2::new(-2293337525465880409, 5736255249834646932); let r = i64x2::new(2921430482628531027, -4208815595153969049); - assert_eq!(r, transmute(lsx_vssub_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssub_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1811,7 +2328,13 @@ unsafe fn test_lsx_vssub_bu() { ); let r = i64x2::new(1441151919413273782, 87960930222283); - assert_eq!(r, transmute(lsx_vssub_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssub_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1820,7 +2343,13 @@ unsafe fn test_lsx_vssub_hu() { let b = u16x8::new(50468, 33060, 15257, 59071, 59343, 21993, 42978, 20097); let r = i64x2::new(902801202201243247, -7922957643493867520); - assert_eq!(r, transmute(lsx_vssub_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssub_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1829,7 +2358,13 @@ unsafe fn test_lsx_vssub_wu() { let b = u32x4::new(31483972, 3489479082, 152079374, 1875131600); let r = i64x2::new(66202020638834260, 1378022115978010238); - assert_eq!(r, transmute(lsx_vssub_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssub_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1838,7 +2373,13 @@ unsafe fn test_lsx_vssub_du() { let b = u64x2::new(6460869225596371206, 16765308520486969885); let r = i64x2::new(8426906920692365065, 0); - assert_eq!(r, transmute(lsx_vssub_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssub_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1851,7 +2392,13 @@ unsafe fn test_lsx_vabsd_b() { ); let r = i64x2::new(4230359294854509733, 2116586434120326452); - assert_eq!(r, transmute(lsx_vabsd_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vabsd_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1860,7 +2407,13 @@ unsafe fn test_lsx_vabsd_h() { let b = i16x8::new(9346, 27961, 21592, 10762, -6831, 17219, 14968, -1750); let r = i64x2::new(4018377481144584593, 2994052849949411737); - assert_eq!(r, transmute(lsx_vabsd_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vabsd_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1869,7 +2422,13 @@ unsafe fn test_lsx_vabsd_w() { let b = i32x4::new(-638463360, -1154268425, 818053243, -1766966029); let r = i64x2::new(4346218292750542585, 1613133471209364690); - assert_eq!(r, transmute(lsx_vabsd_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vabsd_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1878,7 +2437,13 @@ unsafe fn test_lsx_vabsd_d() { let b = i64x2::new(-8533946706796471089, 1165272962517390961); let r = i64x2::new(7188249046367538699, 8146605509049538382); - assert_eq!(r, transmute(lsx_vabsd_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vabsd_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1891,7 +2456,13 @@ unsafe fn test_lsx_vabsd_bu() { ); let r = i64x2::new(2316568964225934796, 5350198762417854927); - assert_eq!(r, transmute(lsx_vabsd_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vabsd_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1900,7 +2471,13 @@ unsafe fn test_lsx_vabsd_hu() { let b = u16x8::new(42102, 40052, 6807, 16289, 29686, 38061, 42843, 26642); let r = i64x2::new(-6889746235852116468, 1175584127230950722); - assert_eq!(r, transmute(lsx_vabsd_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vabsd_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1909,7 +2486,13 @@ unsafe fn test_lsx_vabsd_wu() { let b = u32x4::new(3008439409, 976530727, 1726048801, 4235308512); let r = i64x2::new(-5056055741505581388, 103751774096297765); - assert_eq!(r, transmute(lsx_vabsd_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vabsd_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1918,7 +2501,13 @@ unsafe fn test_lsx_vabsd_du() { let b = u64x2::new(305704565845198935, 18327726360649467511); let r = i64x2::new(-4540227154002526968, -1590034053554043722); - assert_eq!(r, transmute(lsx_vabsd_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vabsd_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -1931,7 +2520,10 @@ unsafe fn test_lsx_vmul_b() { ); let r = i64x2::new(-836412611799730432, -7959044669412588992); - assert_eq!(r, transmute(lsx_vmul_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmul_b(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -1940,7 +2532,10 @@ unsafe fn test_lsx_vmul_h() { let b = i16x8::new(-18582, -25667, 17674, 8424, -17121, -21798, 28934, -353); let r = i64x2::new(-7419436171490628650, 3947512047518358605); - assert_eq!(r, transmute(lsx_vmul_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmul_h(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -1949,7 +2544,10 @@ unsafe fn test_lsx_vmul_w() { let b = i32x4::new(1754730718, 782084571, 894216679, -1895747372); let r = i64x2::new(6602438528086061106, 4680306660704041039); - assert_eq!(r, transmute(lsx_vmul_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmul_w(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -1958,7 +2556,10 @@ unsafe fn test_lsx_vmul_d() { let b = i64x2::new(8096709215426138432, -5454415917204378153); let r = i64x2::new(-1062747544199352000, -649255846668983579); - assert_eq!(r, transmute(lsx_vmul_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmul_d(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -1976,7 +2577,11 @@ unsafe fn test_lsx_vmadd_b() { assert_eq!( r, - transmute(lsx_vmadd_b(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmadd_b( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -1989,7 +2594,11 @@ unsafe fn test_lsx_vmadd_h() { assert_eq!( r, - transmute(lsx_vmadd_h(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmadd_h( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -2002,7 +2611,11 @@ unsafe fn test_lsx_vmadd_w() { assert_eq!( r, - transmute(lsx_vmadd_w(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmadd_w( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -2015,7 +2628,11 @@ unsafe fn test_lsx_vmadd_d() { assert_eq!( r, - transmute(lsx_vmadd_d(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmadd_d( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -2034,7 +2651,11 @@ unsafe fn test_lsx_vmsub_b() { assert_eq!( r, - transmute(lsx_vmsub_b(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmsub_b( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -2047,7 +2668,11 @@ unsafe fn test_lsx_vmsub_h() { assert_eq!( r, - transmute(lsx_vmsub_h(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmsub_h( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -2060,7 +2685,11 @@ unsafe fn test_lsx_vmsub_w() { assert_eq!( r, - transmute(lsx_vmsub_w(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmsub_w( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -2073,7 +2702,11 @@ unsafe fn test_lsx_vmsub_d() { assert_eq!( r, - transmute(lsx_vmsub_d(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmsub_d( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -2087,7 +2720,10 @@ unsafe fn test_lsx_vdiv_b() { ); let r = i64x2::new(720575944674246657, 281475060530176); - assert_eq!(r, transmute(lsx_vdiv_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vdiv_b(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -2096,7 +2732,10 @@ unsafe fn test_lsx_vdiv_h() { let b = i16x8::new(-11221, 24673, 19931, 3799, -3251, -21373, -13758, -31286); let r = i64x2::new(-1125904201744385, 281470681743353); - assert_eq!(r, transmute(lsx_vdiv_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vdiv_h(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -2105,7 +2744,10 @@ unsafe fn test_lsx_vdiv_w() { let b = i32x4::new(-775731190, 1887886939, 1001718213, 1135075421); let r = i64x2::new(4294967295, 4294967297); - assert_eq!(r, transmute(lsx_vdiv_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vdiv_w(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -2114,7 +2756,10 @@ unsafe fn test_lsx_vdiv_d() { let b = i64x2::new(-9175012156877545557, -6390704898809702209); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vdiv_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vdiv_d(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -2127,7 +2772,13 @@ unsafe fn test_lsx_vdiv_bu() { ); let r = i64x2::new(261, 72058702139687425); - assert_eq!(r, transmute(lsx_vdiv_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vdiv_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2136,7 +2787,13 @@ unsafe fn test_lsx_vdiv_hu() { let b = u16x8::new(25282, 44917, 13706, 63351, 58837, 46710, 29092, 57823); let r = i64x2::new(4294967297, 0); - assert_eq!(r, transmute(lsx_vdiv_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vdiv_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2145,7 +2802,13 @@ unsafe fn test_lsx_vdiv_wu() { let b = u32x4::new(1130189258, 1211056894, 2357258312, 3855913706); let r = i64x2::new(1, 1); - assert_eq!(r, transmute(lsx_vdiv_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vdiv_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2154,7 +2817,13 @@ unsafe fn test_lsx_vdiv_du() { let b = u64x2::new(14945948123666054968, 10864054932328247404); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vdiv_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vdiv_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2167,7 +2836,13 @@ unsafe fn test_lsx_vhaddw_h_b() { ); let r = i64x2::new(13791943145684950, -562821104926904); - assert_eq!(r, transmute(lsx_vhaddw_h_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vhaddw_h_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2178,7 +2853,13 @@ unsafe fn test_lsx_vhaddw_w_h() { ); let r = i64x2::new(56307021213062, 183021441324639); - assert_eq!(r, transmute(lsx_vhaddw_w_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vhaddw_w_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2187,7 +2868,13 @@ unsafe fn test_lsx_vhaddw_d_w() { let b = i32x4::new(-1119468785, -1334232049, -1752131604, -2016112631); let r = i64x2::new(-2502031305, -1217615295); - assert_eq!(r, transmute(lsx_vhaddw_d_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vhaddw_d_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2200,7 +2887,13 @@ unsafe fn test_lsx_vhaddw_hu_bu() { ); let r = i64x2::new(45601115212087520, 21110838012870921); - assert_eq!(r, transmute(lsx_vhaddw_hu_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vhaddw_hu_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2209,7 +2902,13 @@ unsafe fn test_lsx_vhaddw_wu_hu() { let b = u16x8::new(40369, 53005, 64424, 35720, 9231, 19965, 20662, 8208); let r = i64x2::new(411432097222434, 312888367535410); - assert_eq!(r, transmute(lsx_vhaddw_wu_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vhaddw_wu_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2218,7 +2917,13 @@ unsafe fn test_lsx_vhaddw_du_wu() { let b = u32x4::new(728838120, 1267673009, 2659634151, 2264611356); let r = i64x2::new(4172122985, 4839922613); - assert_eq!(r, transmute(lsx_vhaddw_du_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vhaddw_du_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2231,7 +2936,13 @@ unsafe fn test_lsx_vhsubw_h_b() { ); let r = i64x2::new(-4503363402989617, -31243430355664844); - assert_eq!(r, transmute(lsx_vhsubw_h_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vhsubw_h_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2240,7 +2951,13 @@ unsafe fn test_lsx_vhsubw_w_h() { let b = i16x8::new(-14204, -13312, 8240, -4455, -6362, -4711, -30790, -15773); let r = i64x2::new(70059506530916, 60275571046613); - assert_eq!(r, transmute(lsx_vhsubw_w_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vhsubw_w_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2249,7 +2966,13 @@ unsafe fn test_lsx_vhsubw_d_w() { let b = i32x4::new(-1671723008, 870456702, 264823818, 13322401); let r = i64x2::new(-201438605, 449141316); - assert_eq!(r, transmute(lsx_vhsubw_d_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vhsubw_d_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2262,7 +2985,13 @@ unsafe fn test_lsx_vhsubw_hu_bu() { ); let r = i64x2::new(-62206416523952172, 42783380429340790); - assert_eq!(r, transmute(lsx_vhsubw_hu_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vhsubw_hu_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2271,7 +3000,13 @@ unsafe fn test_lsx_vhsubw_wu_hu() { let b = u16x8::new(5212, 32159, 36502, 59290, 7604, 229, 35511, 47443); let r = i64x2::new(24696062008394, -147484881944276); - assert_eq!(r, transmute(lsx_vhsubw_wu_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vhsubw_wu_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2280,7 +3015,13 @@ unsafe fn test_lsx_vhsubw_du_wu() { let b = u32x4::new(1383087137, 2403951939, 360532131, 3513614550); let r = i64x2::new(-601935499, 31776736); - assert_eq!(r, transmute(lsx_vhsubw_du_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vhsubw_du_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2293,7 +3034,10 @@ unsafe fn test_lsx_vmod_b() { ); let r = i64x2::new(2804691417388804007, -2461515231199824166); - assert_eq!(r, transmute(lsx_vmod_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmod_b(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -2302,7 +3046,10 @@ unsafe fn test_lsx_vmod_h() { let b = i16x8::new(1550, 9221, -12080, 14553, -24847, 28286, 1074, 192); let r = i64x2::new(3930282117007147005, -10982007906888970); - assert_eq!(r, transmute(lsx_vmod_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmod_h(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -2311,7 +3058,10 @@ unsafe fn test_lsx_vmod_w() { let b = i32x4::new(344507881, 1692387020, -1397506903, -1257953510); let r = i64x2::new(-5027973877095011085, 2553570821342119010); - assert_eq!(r, transmute(lsx_vmod_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmod_w(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -2320,7 +3070,10 @@ unsafe fn test_lsx_vmod_d() { let b = i64x2::new(4636642606889723746, -259899475747531088); let r = i64x2::new(-1381676014874400835, -257849503742906530); - assert_eq!(r, transmute(lsx_vmod_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmod_d(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -2333,7 +3086,13 @@ unsafe fn test_lsx_vmod_bu() { ); let r = i64x2::new(7287961163701724026, 4745974892933063220); - assert_eq!(r, transmute(lsx_vmod_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmod_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2342,7 +3101,13 @@ unsafe fn test_lsx_vmod_hu() { let b = u16x8::new(15317, 24954, 61354, 3720, 21471, 6193, 8193, 35745); let r = i64x2::new(315403234587388856, 7101062794264266609); - assert_eq!(r, transmute(lsx_vmod_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmod_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2351,7 +3116,13 @@ unsafe fn test_lsx_vmod_wu() { let b = u32x4::new(49228057, 2249712923, 358897384, 1782599598); let r = i64x2::new(1070413902953059662, 3340025749258890964); - assert_eq!(r, transmute(lsx_vmod_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmod_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2360,7 +3131,13 @@ unsafe fn test_lsx_vmod_du() { let b = u64x2::new(16850073055169051895, 16069565262862467484); let r = i64x2::new(7747010922784437137, 20234676239478699); - assert_eq!(r, transmute(lsx_vmod_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmod_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2370,7 +3147,7 @@ unsafe fn test_lsx_vreplve_b() { ); let r = i64x2::new(-2893606913523066921, -2893606913523066921); - assert_eq!(r, transmute(lsx_vreplve_b(transmute(a), -8))); + assert_eq!(r, transmute(lsx_vreplve_b(black_box(transmute(a)), -8))); } #[simd_test(enable = "lsx")] @@ -2378,7 +3155,7 @@ unsafe fn test_lsx_vreplve_h() { let a = i16x8::new(-29429, -23495, 8705, -7614, -25353, 11887, -25989, -12818); let r = i64x2::new(-3607719825936298514, -3607719825936298514); - assert_eq!(r, transmute(lsx_vreplve_h(transmute(a), 7))); + assert_eq!(r, transmute(lsx_vreplve_h(black_box(transmute(a)), 7))); } #[simd_test(enable = "lsx")] @@ -2386,7 +3163,7 @@ unsafe fn test_lsx_vreplve_w() { let a = i32x4::new(1584940676, 95787593, -1655264847, 682404402); let r = i64x2::new(411404579393346121, 411404579393346121); - assert_eq!(r, transmute(lsx_vreplve_w(transmute(a), -3))); + assert_eq!(r, transmute(lsx_vreplve_w(black_box(transmute(a)), -3))); } #[simd_test(enable = "lsx")] @@ -2394,7 +3171,7 @@ unsafe fn test_lsx_vreplve_d() { let a = i64x2::new(7614424214598615675, -7096892795239148002); let r = i64x2::new(7614424214598615675, 7614424214598615675); - assert_eq!(r, transmute(lsx_vreplve_d(transmute(a), 0))); + assert_eq!(r, transmute(lsx_vreplve_d(black_box(transmute(a)), 0))); } #[simd_test(enable = "lsx")] @@ -2404,7 +3181,7 @@ unsafe fn test_lsx_vreplvei_b() { ); let r = i64x2::new(-2097865012304223518, -2097865012304223518); - assert_eq!(r, transmute(lsx_vreplvei_b::<5>(transmute(a)))); + assert_eq!(r, transmute(lsx_vreplvei_b::<5>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -2412,7 +3189,7 @@ unsafe fn test_lsx_vreplvei_h() { let a = i16x8::new(-15455, -4410, 5029, 25863, -23170, 26570, 27423, -834); let r = i64x2::new(7719006069021698847, 7719006069021698847); - assert_eq!(r, transmute(lsx_vreplvei_h::<6>(transmute(a)))); + assert_eq!(r, transmute(lsx_vreplvei_h::<6>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -2420,7 +3197,7 @@ unsafe fn test_lsx_vreplvei_w() { let a = i32x4::new(1843143434, 491125746, -328585251, -1996512058); let r = i64x2::new(7916240772710277898, 7916240772710277898); - assert_eq!(r, transmute(lsx_vreplvei_w::<0>(transmute(a)))); + assert_eq!(r, transmute(lsx_vreplvei_w::<0>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -2428,7 +3205,7 @@ unsafe fn test_lsx_vreplvei_d() { let a = i64x2::new(4333963848299154309, -8310246545782080694); let r = i64x2::new(-8310246545782080694, -8310246545782080694); - assert_eq!(r, transmute(lsx_vreplvei_d::<1>(transmute(a)))); + assert_eq!(r, transmute(lsx_vreplvei_d::<1>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -2441,7 +3218,13 @@ unsafe fn test_lsx_vpickev_b() { ); let r = i64x2::new(3921750152141124833, -933322373843017127); - assert_eq!(r, transmute(lsx_vpickev_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vpickev_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2450,7 +3233,13 @@ unsafe fn test_lsx_vpickev_h() { let b = i16x8::new(-5248, -1786, -21768, 23214, -4223, 23538, -24936, -32316); let r = i64x2::new(-7018596679058658432, 139073165196191894); - assert_eq!(r, transmute(lsx_vpickev_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vpickev_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2459,7 +3248,13 @@ unsafe fn test_lsx_vpickev_w() { let b = i32x4::new(-1187277846, -787064901, -980229113, 1746235326); let r = i64x2::new(-4210051979814398998, -769258006856513132); - assert_eq!(r, transmute(lsx_vpickev_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vpickev_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2468,7 +3263,13 @@ unsafe fn test_lsx_vpickev_d() { let b = i64x2::new(6574352346370076190, -3979792156310826694); let r = i64x2::new(6574352346370076190, 1789073368466131160); - assert_eq!(r, transmute(lsx_vpickev_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vpickev_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2481,7 +3282,13 @@ unsafe fn test_lsx_vpickod_b() { ); let r = i64x2::new(8220640377280882872, -6083110277645985532); - assert_eq!(r, transmute(lsx_vpickod_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vpickod_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2490,7 +3297,13 @@ unsafe fn test_lsx_vpickod_h() { let b = i16x8::new(12047, 25024, -10709, -28077, 24357, 19934, 10289, 28546); let r = i64x2::new(8035070303515402688, 6167254016163165900); - assert_eq!(r, transmute(lsx_vpickod_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vpickod_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2499,7 +3312,13 @@ unsafe fn test_lsx_vpickod_w() { let b = i32x4::new(-99240403, 314407358, 543396756, 1976776696); let r = i64x2::new(8490191261129341374, -7045044594236590438); - assert_eq!(r, transmute(lsx_vpickod_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vpickod_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2508,7 +3327,13 @@ unsafe fn test_lsx_vpickod_d() { let b = i64x2::new(-4197243771252175958, -543692393753629390); let r = i64x2::new(-543692393753629390, -7578696032343374601); - assert_eq!(r, transmute(lsx_vpickod_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vpickod_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2521,7 +3346,13 @@ unsafe fn test_lsx_vilvh_b() { ); let r = i64x2::new(1211180715666052671, -2634368371891034045); - assert_eq!(r, transmute(lsx_vilvh_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vilvh_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2530,7 +3361,13 @@ unsafe fn test_lsx_vilvh_h() { let b = i16x8::new(23768, -31845, 28689, 14757, 9499, 7795, -13573, -10011); let r = i64x2::new(-4714953853167983333, 4564918175499275003); - assert_eq!(r, transmute(lsx_vilvh_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vilvh_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2539,7 +3376,13 @@ unsafe fn test_lsx_vilvh_w() { let b = i32x4::new(-737076987, 38515006, 602108871, -63099569); let r = i64x2::new(-5365723764939852857, -1200522227779556017); - assert_eq!(r, transmute(lsx_vilvh_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vilvh_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2548,7 +3391,13 @@ unsafe fn test_lsx_vilvh_d() { let b = i64x2::new(-2160658667838026389, 1449429407527660400); let r = i64x2::new(1449429407527660400, 5375050218784453679); - assert_eq!(r, transmute(lsx_vilvh_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vilvh_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2561,7 +3410,13 @@ unsafe fn test_lsx_vilvl_b() { ); let r = i64x2::new(6945744258789947856, 8515979671552484861); - assert_eq!(r, transmute(lsx_vilvl_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vilvl_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2570,7 +3425,13 @@ unsafe fn test_lsx_vilvl_h() { let b = i16x8::new(11601, 6788, 3174, -4208, -25999, -25660, -4591, 7133); let r = i64x2::new(-6560589601043632815, -2260825085889541018); - assert_eq!(r, transmute(lsx_vilvl_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vilvl_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2579,7 +3440,13 @@ unsafe fn test_lsx_vilvl_w() { let b = i32x4::new(486029703, 1245981961, 112180197, 1939621508); let r = i64x2::new(-4282490222245561977, 7435326725564935433); - assert_eq!(r, transmute(lsx_vilvl_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vilvl_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2588,7 +3455,13 @@ unsafe fn test_lsx_vilvl_d() { let b = i64x2::new(3142531875873363679, 736682102982019415); let r = i64x2::new(3142531875873363679, 7063413230460842607); - assert_eq!(r, transmute(lsx_vilvl_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vilvl_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2601,7 +3474,13 @@ unsafe fn test_lsx_vpackev_b() { ); let r = i64x2::new(-1928363389519380677, -1882898104368665381); - assert_eq!(r, transmute(lsx_vpackev_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vpackev_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2610,7 +3489,13 @@ unsafe fn test_lsx_vpackev_h() { let b = i16x8::new(-9444, 5210, -14402, 17972, 16606, 2450, 5123, 14727); let r = i64x2::new(7533052947329899292, 1461440082551914718); - assert_eq!(r, transmute(lsx_vpackev_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vpackev_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2619,7 +3504,13 @@ unsafe fn test_lsx_vpackev_w() { let b = i32x4::new(-872903277, 1255047449, -2110158279, 682925573); let r = i64x2::new(5636997704425442707, -8345976908349339079); - assert_eq!(r, transmute(lsx_vpackev_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vpackev_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2628,7 +3519,13 @@ unsafe fn test_lsx_vpackev_d() { let b = i64x2::new(-9119315954224042738, -4563700463464702181); let r = i64x2::new(-9119315954224042738, 7118943335298607169); - assert_eq!(r, transmute(lsx_vpackev_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vpackev_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2641,7 +3538,13 @@ unsafe fn test_lsx_vpackod_b() { ); let r = i64x2::new(4389351353151377653, -4315624792288929032); - assert_eq!(r, transmute(lsx_vpackod_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vpackod_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2650,7 +3553,13 @@ unsafe fn test_lsx_vpackod_h() { let b = i16x8::new(-23247, 17928, -13353, -20146, 5696, 22071, -10728, -30262); let r = i64x2::new(-4433598883325590008, -9178747487946648009); - assert_eq!(r, transmute(lsx_vpackod_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vpackod_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2659,7 +3568,13 @@ unsafe fn test_lsx_vpackod_w() { let b = i32x4::new(445270781, 793617340, -1461557030, -22199234); let r = i64x2::new(51238874735551420, 6731566319615689790); - assert_eq!(r, transmute(lsx_vpackod_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vpackod_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2668,7 +3583,13 @@ unsafe fn test_lsx_vpackod_d() { let b = i64x2::new(9039771682296134623, -6404442538060227683); let r = i64x2::new(-6404442538060227683, -4670773907187480618); - assert_eq!(r, transmute(lsx_vpackod_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vpackod_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -2680,7 +3601,11 @@ unsafe fn test_lsx_vshuf_h() { assert_eq!( r, - transmute(lsx_vshuf_h(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vshuf_h( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -2693,7 +3618,11 @@ unsafe fn test_lsx_vshuf_w() { assert_eq!( r, - transmute(lsx_vshuf_w(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vshuf_w( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -2706,7 +3635,11 @@ unsafe fn test_lsx_vshuf_d() { assert_eq!( r, - transmute(lsx_vshuf_d(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vshuf_d( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -2720,7 +3653,10 @@ unsafe fn test_lsx_vand_v() { ); let r = i64x2::new(244105884219744360, -9223116804091473582); - assert_eq!(r, transmute(lsx_vand_v(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vand_v(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -2730,7 +3666,7 @@ unsafe fn test_lsx_vandi_b() { ); let r = i64x2::new(-8135737750142058361, -7666517314596397435); - assert_eq!(r, transmute(lsx_vandi_b::<159>(transmute(a)))); + assert_eq!(r, transmute(lsx_vandi_b::<159>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -2743,7 +3679,10 @@ unsafe fn test_lsx_vor_v() { ); let r = i64x2::new(-2351582766212852737, -4924766118269159990); - assert_eq!(r, transmute(lsx_vor_v(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vor_v(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -2753,7 +3692,7 @@ unsafe fn test_lsx_vori_b() { ); let r = i64x2::new(-589140355308650538, -3179554720060804109); - assert_eq!(r, transmute(lsx_vori_b::<210>(transmute(a)))); + assert_eq!(r, transmute(lsx_vori_b::<210>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -2766,7 +3705,10 @@ unsafe fn test_lsx_vnor_v() { ); let r = i64x2::new(3036560889408918025, 7823034030269427744); - assert_eq!(r, transmute(lsx_vnor_v(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vnor_v(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -2776,7 +3718,7 @@ unsafe fn test_lsx_vnori_b() { ); let r = i64x2::new(5227628601268782144, 596802560304890884); - assert_eq!(r, transmute(lsx_vnori_b::<51>(transmute(a)))); + assert_eq!(r, transmute(lsx_vnori_b::<51>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -2789,7 +3731,10 @@ unsafe fn test_lsx_vxor_v() { ); let r = i64x2::new(8732028225622312747, 6858262329367852470); - assert_eq!(r, transmute(lsx_vxor_v(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vxor_v(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -2799,7 +3744,7 @@ unsafe fn test_lsx_vxori_b() { ); let r = i64x2::new(3478586993001400570, 4687744515358339026); - assert_eq!(r, transmute(lsx_vxori_b::<225>(transmute(a)))); + assert_eq!(r, transmute(lsx_vxori_b::<225>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -2817,7 +3762,11 @@ unsafe fn test_lsx_vbitsel_v() { assert_eq!( r, - transmute(lsx_vbitsel_v(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vbitsel_v( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -2833,7 +3782,10 @@ unsafe fn test_lsx_vbitseli_b() { assert_eq!( r, - transmute(lsx_vbitseli_b::<65>(transmute(a), transmute(b))) + transmute(lsx_vbitseli_b::<65>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -2844,7 +3796,7 @@ unsafe fn test_lsx_vshuf4i_b() { ); let r = i64x2::new(3937170420478429898, -3347145886530736916); - assert_eq!(r, transmute(lsx_vshuf4i_b::<234>(transmute(a)))); + assert_eq!(r, transmute(lsx_vshuf4i_b::<234>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -2852,7 +3804,7 @@ unsafe fn test_lsx_vshuf4i_h() { let a = i16x8::new(27707, -1094, -15784, -28387, 31634, -12323, -30387, -11480); let r = i64x2::new(-7989953385787032646, -3231104182470389795); - assert_eq!(r, transmute(lsx_vshuf4i_h::<209>(transmute(a)))); + assert_eq!(r, transmute(lsx_vshuf4i_h::<209>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -2860,35 +3812,38 @@ unsafe fn test_lsx_vshuf4i_w() { let a = i32x4::new(768986805, -1036149600, -1196682940, -214444511); let r = i64x2::new(3302773179299516085, -5139714087882845884); - assert_eq!(r, transmute(lsx_vshuf4i_w::<160>(transmute(a)))); + assert_eq!(r, transmute(lsx_vshuf4i_w::<160>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] unsafe fn test_lsx_vreplgr2vr_b() { let r = i64x2::new(795741901218843403, 795741901218843403); - assert_eq!(r, transmute(lsx_vreplgr2vr_b(970839819))); + assert_eq!(r, transmute(lsx_vreplgr2vr_b(black_box(970839819)))); } #[simd_test(enable = "lsx")] unsafe fn test_lsx_vreplgr2vr_h() { let r = i64x2::new(-6504141532176800324, -6504141532176800324); - assert_eq!(r, transmute(lsx_vreplgr2vr_h(93693372))); + assert_eq!(r, transmute(lsx_vreplgr2vr_h(black_box(93693372)))); } #[simd_test(enable = "lsx")] unsafe fn test_lsx_vreplgr2vr_w() { let r = i64x2::new(-6737078705572473188, -6737078705572473188); - assert_eq!(r, transmute(lsx_vreplgr2vr_w(-1568598372))); + assert_eq!(r, transmute(lsx_vreplgr2vr_w(black_box(-1568598372)))); } #[simd_test(enable = "lsx")] unsafe fn test_lsx_vreplgr2vr_d() { let r = i64x2::new(5000134708087557572, 5000134708087557572); - assert_eq!(r, transmute(lsx_vreplgr2vr_d(5000134708087557572))); + assert_eq!( + r, + transmute(lsx_vreplgr2vr_d(black_box(5000134708087557572))) + ); } #[simd_test(enable = "lsx")] @@ -2898,7 +3853,7 @@ unsafe fn test_lsx_vpcnt_b() { ); let r = i64x2::new(217867142450840068, 145528077781566722); - assert_eq!(r, transmute(lsx_vpcnt_b(transmute(a)))); + assert_eq!(r, transmute(lsx_vpcnt_b(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -2906,7 +3861,7 @@ unsafe fn test_lsx_vpcnt_h() { let a = i16x8::new(-512, 10388, -21267, -27094, 1085, -26444, -29360, -11576); let r = i64x2::new(1970367786975239, 1970350607237126); - assert_eq!(r, transmute(lsx_vpcnt_h(transmute(a)))); + assert_eq!(r, transmute(lsx_vpcnt_h(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -2914,7 +3869,7 @@ unsafe fn test_lsx_vpcnt_w() { let a = i32x4::new(1399276601, -2094725994, -100739325, -1239551533); let r = i64x2::new(47244640271, 81604378645); - assert_eq!(r, transmute(lsx_vpcnt_w(transmute(a)))); + assert_eq!(r, transmute(lsx_vpcnt_w(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -2922,7 +3877,7 @@ unsafe fn test_lsx_vpcnt_d() { let a = i64x2::new(-4470823169399930539, 3184270543884128372); let r = i64x2::new(29, 25); - assert_eq!(r, transmute(lsx_vpcnt_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vpcnt_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -2932,7 +3887,7 @@ unsafe fn test_lsx_vclo_b() { ); let r = i64x2::new(72057594071547904, 3311470116864); - assert_eq!(r, transmute(lsx_vclo_b(transmute(a)))); + assert_eq!(r, transmute(lsx_vclo_b(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -2940,7 +3895,7 @@ unsafe fn test_lsx_vclo_h() { let a = i16x8::new(-5432, 27872, -9150, 27393, 25236, 1028, -21312, -25189); let r = i64x2::new(8589934595, 281479271677952); - assert_eq!(r, transmute(lsx_vclo_h(transmute(a)))); + assert_eq!(r, transmute(lsx_vclo_h(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -2948,7 +3903,7 @@ unsafe fn test_lsx_vclo_w() { let a = i32x4::new(1214322611, -1755838761, -1222326743, -1511364419); let r = i64x2::new(4294967296, 4294967297); - assert_eq!(r, transmute(lsx_vclo_w(transmute(a)))); + assert_eq!(r, transmute(lsx_vclo_w(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -2956,7 +3911,7 @@ unsafe fn test_lsx_vclo_d() { let a = i64x2::new(-249299854527467825, -459308653408461862); let r = i64x2::new(6, 5); - assert_eq!(r, transmute(lsx_vclo_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vclo_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -2966,7 +3921,7 @@ unsafe fn test_lsx_vclz_b() { ); let r = i64x2::new(144116287587483648, 72903118479688195); - assert_eq!(r, transmute(lsx_vclz_b(transmute(a)))); + assert_eq!(r, transmute(lsx_vclz_b(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -2974,7 +3929,7 @@ unsafe fn test_lsx_vclz_h() { let a = i16x8::new(1222, 32426, 3164, -10763, 10189, -4197, -21841, -28676); let r = i64x2::new(17179934725, 2); - assert_eq!(r, transmute(lsx_vclz_h(transmute(a)))); + assert_eq!(r, transmute(lsx_vclz_h(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -2982,7 +3937,7 @@ unsafe fn test_lsx_vclz_w() { let a = i32x4::new(-490443689, -1039971379, -217310592, -1921086575); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vclz_w(transmute(a)))); + assert_eq!(r, transmute(lsx_vclz_w(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -2990,7 +3945,7 @@ unsafe fn test_lsx_vclz_d() { let a = i64x2::new(4630351532137644314, -6587611980764816064); let r = i64x2::new(1, 0); - assert_eq!(r, transmute(lsx_vclz_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vclz_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3000,7 +3955,10 @@ unsafe fn test_lsx_vpickve2gr_b() { ); let r: i32 = 51; - assert_eq!(r, transmute(lsx_vpickve2gr_b::<15>(transmute(a)))); + assert_eq!( + r, + transmute(lsx_vpickve2gr_b::<15>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lsx")] @@ -3008,7 +3966,7 @@ unsafe fn test_lsx_vpickve2gr_h() { let a = i16x8::new(-12924, 31013, 18171, 20404, 21226, 14128, -6255, 26521); let r: i32 = 21226; - assert_eq!(r, transmute(lsx_vpickve2gr_h::<4>(transmute(a)))); + assert_eq!(r, transmute(lsx_vpickve2gr_h::<4>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3016,7 +3974,7 @@ unsafe fn test_lsx_vpickve2gr_w() { let a = i32x4::new(-1559379275, 2065542381, -1882161334, 1502157419); let r: i32 = -1882161334; - assert_eq!(r, transmute(lsx_vpickve2gr_w::<2>(transmute(a)))); + assert_eq!(r, transmute(lsx_vpickve2gr_w::<2>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3024,7 +3982,7 @@ unsafe fn test_lsx_vpickve2gr_d() { let a = i64x2::new(-6941380853339482104, 8405634758774935528); let r: i64 = -6941380853339482104; - assert_eq!(r, transmute(lsx_vpickve2gr_d::<0>(transmute(a)))); + assert_eq!(r, transmute(lsx_vpickve2gr_d::<0>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3034,7 +3992,10 @@ unsafe fn test_lsx_vpickve2gr_bu() { ); let r: u32 = 199; - assert_eq!(r, transmute(lsx_vpickve2gr_bu::<8>(transmute(a)))); + assert_eq!( + r, + transmute(lsx_vpickve2gr_bu::<8>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lsx")] @@ -3042,7 +4003,10 @@ unsafe fn test_lsx_vpickve2gr_hu() { let a = i16x8::new(25003, 5139, -12977, 7550, -12177, 19294, -2216, 12693); let r: u32 = 25003; - assert_eq!(r, transmute(lsx_vpickve2gr_hu::<0>(transmute(a)))); + assert_eq!( + r, + transmute(lsx_vpickve2gr_hu::<0>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lsx")] @@ -3050,7 +4014,10 @@ unsafe fn test_lsx_vpickve2gr_wu() { let a = i32x4::new(-295894883, 551663550, -710853968, 82692774); let r: u32 = 3999072413; - assert_eq!(r, transmute(lsx_vpickve2gr_wu::<0>(transmute(a)))); + assert_eq!( + r, + transmute(lsx_vpickve2gr_wu::<0>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lsx")] @@ -3058,7 +4025,10 @@ unsafe fn test_lsx_vpickve2gr_du() { let a = i64x2::new(748282319555413922, -1352335765832355666); let r: u64 = 748282319555413922; - assert_eq!(r, transmute(lsx_vpickve2gr_du::<0>(transmute(a)))); + assert_eq!( + r, + transmute(lsx_vpickve2gr_du::<0>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lsx")] @@ -3070,7 +4040,7 @@ unsafe fn test_lsx_vinsgr2vr_b() { assert_eq!( r, - transmute(lsx_vinsgr2vr_b::<14>(transmute(a), 1333652061)) + transmute(lsx_vinsgr2vr_b::<14>(black_box(transmute(a)), 1333652061)) ); } @@ -3079,7 +4049,10 @@ unsafe fn test_lsx_vinsgr2vr_h() { let a = i16x8::new(-20591, 7819, 25287, -11296, 4604, 28833, -1306, 6418); let r = i64x2::new(-3179432729573085295, 1806782266980897276); - assert_eq!(r, transmute(lsx_vinsgr2vr_h::<5>(transmute(a), -987420193))); + assert_eq!( + r, + transmute(lsx_vinsgr2vr_h::<5>(black_box(transmute(a)), -987420193)) + ); } #[simd_test(enable = "lsx")] @@ -3087,7 +4060,10 @@ unsafe fn test_lsx_vinsgr2vr_w() { let a = i32x4::new(1608179655, 886830932, -621638499, 2021214690); let r = i64x2::new(3808909851629379527, 8681050995079237782); - assert_eq!(r, transmute(lsx_vinsgr2vr_w::<2>(transmute(a), -960507754))); + assert_eq!( + r, + transmute(lsx_vinsgr2vr_w::<2>(black_box(transmute(a)), -960507754)) + ); } #[simd_test(enable = "lsx")] @@ -3095,7 +4071,10 @@ unsafe fn test_lsx_vinsgr2vr_d() { let a = i64x2::new(-6562091001143116290, -2425423285843953307); let r = i64x2::new(-6562091001143116290, -233659266); - assert_eq!(r, transmute(lsx_vinsgr2vr_d::<1>(transmute(a), -233659266))); + assert_eq!( + r, + transmute(lsx_vinsgr2vr_d::<1>(black_box(transmute(a)), -233659266)) + ); } #[simd_test(enable = "lsx")] @@ -3104,7 +4083,13 @@ unsafe fn test_lsx_vfadd_s() { let b = u32x4::new(1050272808, 1054022924, 1064036136, 1063113730); let r = i64x2::new(4588396142719948771, 4567018621615066847); - assert_eq!(r, transmute(lsx_vfadd_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfadd_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3113,7 +4098,13 @@ unsafe fn test_lsx_vfadd_d() { let b = u64x2::new(4605819027271079334, 4601207158507578498); let r = i64x2::new(4608685566198055604, 4608371493448991663); - assert_eq!(r, transmute(lsx_vfadd_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfadd_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3122,7 +4113,13 @@ unsafe fn test_lsx_vfsub_s() { let b = u32x4::new(1063475462, 1045836432, 1065150677, 1042376676); let r = i64x2::new(4532926601401089072, 4475386505810184670); - assert_eq!(r, transmute(lsx_vfsub_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfsub_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3131,7 +4128,13 @@ unsafe fn test_lsx_vfsub_d() { let b = u64x2::new(4605973926398825814, 4600156145303017004); let r = i64x2::new(-4622342180736116526, 4603750919602422881); - assert_eq!(r, transmute(lsx_vfsub_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfsub_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3140,7 +4143,13 @@ unsafe fn test_lsx_vfmul_s() { let b = u32x4::new(1065241951, 1044285812, 1050678216, 1009264512); let r = i64x2::new(4471727895898079441, 4289440988347233543); - assert_eq!(r, transmute(lsx_vfmul_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfmul_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3149,7 +4158,13 @@ unsafe fn test_lsx_vfmul_d() { let b = u64x2::new(4605208047666947899, 4599634375243914522); let r = i64x2::new(4591550625791030606, 4595475933048682142); - assert_eq!(r, transmute(lsx_vfmul_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfmul_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3158,7 +4173,13 @@ unsafe fn test_lsx_vfdiv_s() { let b = u32x4::new(1055538538, 1042248668, 1061233585, 1063649172); let r = i64x2::new(4613180427594946541, 4523223175100126088); - assert_eq!(r, transmute(lsx_vfdiv_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfdiv_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3167,7 +4188,13 @@ unsafe fn test_lsx_vfdiv_d() { let b = u64x2::new(4606326032528596062, 4601783079746725386); let r = i64x2::new(4592460108638699314, 4612120084672695832); - assert_eq!(r, transmute(lsx_vfdiv_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfdiv_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3176,7 +4203,13 @@ unsafe fn test_lsx_vfcvt_h_s() { let b = u32x4::new(1049501482, 1043939972, 1042291392, 1041250232); let r = i64x2::new(3495410141992989809, 3873441386606634666); - assert_eq!(r, transmute(lsx_vfcvt_h_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcvt_h_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3185,7 +4218,13 @@ unsafe fn test_lsx_vfcvt_s_d() { let b = u64x2::new(4600251021237488420, 4593890179408150924); let r = i64x2::new(4469319308295208818, 4496796258465732597); - assert_eq!(r, transmute(lsx_vfcvt_s_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcvt_s_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3194,7 +4233,13 @@ unsafe fn test_lsx_vfmin_s() { let b = u32x4::new(1060093085, 1026130528, 1057322097, 1057646773); let r = i64x2::new(4407197060203522560, 4542558301798153756); - assert_eq!(r, transmute(lsx_vfmin_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfmin_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3203,7 +4248,13 @@ unsafe fn test_lsx_vfmin_d() { let b = u64x2::new(4584808359801648672, 4602712060570539582); let r = i64x2::new(4584808359801648672, 4602712060570539582); - assert_eq!(r, transmute(lsx_vfmin_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfmin_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3212,7 +4263,13 @@ unsafe fn test_lsx_vfmina_s() { let b = u32x4::new(1049119234, 1058336224, 1057046116, 1029386720); let r = i64x2::new(4519411155382848002, 4421182298393539560); - assert_eq!(r, transmute(lsx_vfmina_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfmina_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3221,7 +4278,13 @@ unsafe fn test_lsx_vfmina_d() { let b = u64x2::new(4599088744110071826, 4598732503789588496); let r = i64x2::new(4599088744110071826, 4598732503789588496); - assert_eq!(r, transmute(lsx_vfmina_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfmina_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3230,7 +4293,13 @@ unsafe fn test_lsx_vfmax_s() { let b = u32x4::new(1042175760, 1040826492, 1059132266, 1050815434); let r = i64x2::new(4557520760982391874, 4573984521684325226); - assert_eq!(r, transmute(lsx_vfmax_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfmax_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3239,7 +4308,13 @@ unsafe fn test_lsx_vfmax_d() { let b = u64x2::new(4593616624275112016, 4605244843740986156); let r = i64x2::new(4606275407710467505, 4605244843740986156); - assert_eq!(r, transmute(lsx_vfmax_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfmax_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3248,7 +4323,13 @@ unsafe fn test_lsx_vfmaxa_s() { let b = u32x4::new(1064739422, 1055122552, 1049654310, 1057411362); let r = i64x2::new(4531716855176798814, 4541547219258471462); - assert_eq!(r, transmute(lsx_vfmaxa_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfmaxa_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3257,7 +4338,13 @@ unsafe fn test_lsx_vfmaxa_d() { let b = u64x2::new(4603647289310579471, 4603999027307573908); let r = i64x2::new(4603647289310579471, 4606304546706191737); - assert_eq!(r, transmute(lsx_vfmaxa_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfmaxa_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3265,7 +4352,7 @@ unsafe fn test_lsx_vfclass_s() { let a = u32x4::new(1059786314, 1058231666, 1061513647, 1038650488); let r = i64x2::new(549755814016, 549755814016); - assert_eq!(r, transmute(lsx_vfclass_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vfclass_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3273,7 +4360,7 @@ unsafe fn test_lsx_vfclass_d() { let a = u64x2::new(4601724705608768104, 4601126152607382566); let r = i64x2::new(128, 128); - assert_eq!(r, transmute(lsx_vfclass_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vfclass_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3281,7 +4368,7 @@ unsafe fn test_lsx_vfsqrt_s() { let a = u32x4::new(1055398716, 1050305974, 995168768, 1064901995); let r = i64x2::new(4543169501430832482, 4574681629207255333); - assert_eq!(r, transmute(lsx_vfsqrt_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vfsqrt_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3289,7 +4376,7 @@ unsafe fn test_lsx_vfsqrt_d() { let a = u64x2::new(4605784293613801157, 4602267946351406890); let r = i64x2::new(4606453893731357485, 4604397310232711799); - assert_eq!(r, transmute(lsx_vfsqrt_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vfsqrt_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3297,7 +4384,7 @@ unsafe fn test_lsx_vfrecip_s() { let a = u32x4::new(1003452672, 1050811504, 1044295808, 1064402913); let r = i64x2::new(4632552602764963931, 4577820515916044016); - assert_eq!(r, transmute(lsx_vfrecip_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vfrecip_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3305,7 +4392,7 @@ unsafe fn test_lsx_vfrecip_d() { let a = u64x2::new(4598634931235673106, 4598630619264835010); let r = i64x2::new(4615355353482170689, 4615362460048142095); - assert_eq!(r, transmute(lsx_vfrecip_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vfrecip_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx,frecipe")] @@ -3313,7 +4400,7 @@ unsafe fn test_lsx_vfrecipe_s() { let a = u32x4::new(1057583779, 1062308847, 1060089100, 1048454688); let r = i64x2::new(4583644530211711115, 4647978179615164140); - assert_eq!(r, transmute(lsx_vfrecipe_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vfrecipe_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx,frecipe")] @@ -3321,7 +4408,7 @@ unsafe fn test_lsx_vfrecipe_d() { let a = u64x2::new(4605515926442181274, 4605369703273365674); let r = i64x2::new(4608204937770303488, 4608317161507651584); - assert_eq!(r, transmute(lsx_vfrecipe_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vfrecipe_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx,frecipe")] @@ -3329,7 +4416,7 @@ unsafe fn test_lsx_vfrsqrte_s() { let a = u32x4::new(1064377488, 1055815904, 1056897740, 1064016656); let r = i64x2::new(4592421282989204764, 4577184195020153336); - assert_eq!(r, transmute(lsx_vfrsqrte_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vfrsqrte_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx,frecipe")] @@ -3337,7 +4424,7 @@ unsafe fn test_lsx_vfrsqrte_d() { let a = u64x2::new(4602766865443628663, 4605323203937791867); let r = i64x2::new(4608986772678901760, 4607734355383549952); - assert_eq!(r, transmute(lsx_vfrsqrte_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vfrsqrte_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3345,7 +4432,7 @@ unsafe fn test_lsx_vfrint_s() { let a = u32x4::new(1062138521, 1056849108, 1034089720, 1038314384); let r = i64x2::new(1065353216, 0); - assert_eq!(r, transmute(lsx_vfrint_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vfrint_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3353,7 +4440,7 @@ unsafe fn test_lsx_vfrint_d() { let a = u64x2::new(4598620052333442366, 4603262362368837514); let r = i64x2::new(0, 4607182418800017408); - assert_eq!(r, transmute(lsx_vfrint_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vfrint_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3361,7 +4448,7 @@ unsafe fn test_lsx_vfrsqrt_s() { let a = u32x4::new(1058614029, 1050504950, 1013814976, 1062355001); let r = i64x2::new(4604601921912011494, 4579384257679777264); - assert_eq!(r, transmute(lsx_vfrsqrt_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vfrsqrt_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3369,7 +4456,7 @@ unsafe fn test_lsx_vfrsqrt_d() { let a = u64x2::new(4602924191185043139, 4606088351077917251); let r = i64x2::new(4608881149202581394, 4607483676176768181); - assert_eq!(r, transmute(lsx_vfrsqrt_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vfrsqrt_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3377,7 +4464,7 @@ unsafe fn test_lsx_vflogb_s() { let a = u32x4::new(1053488512, 1061429282, 1064965594, 1061326585); let r = i64x2::new(-4647714812225126400, -4647714812233515008); - assert_eq!(r, transmute(lsx_vflogb_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vflogb_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3385,7 +4472,7 @@ unsafe fn test_lsx_vflogb_d() { let a = u64x2::new(4589481276789128632, 4599408395082246526); let r = i64x2::new(-4607182418800017408, -4611686018427387904); - assert_eq!(r, transmute(lsx_vflogb_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vflogb_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3393,7 +4480,7 @@ unsafe fn test_lsx_vfcvth_s_h() { let a = i16x8::new(29550, -13884, 689, -1546, 24006, -19112, -12769, 1779); let r = i64x2::new(-4707668984349540352, 4097818267320836096); - assert_eq!(r, transmute(lsx_vfcvth_s_h(transmute(a)))); + assert_eq!(r, transmute(lsx_vfcvth_s_h(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3401,7 +4488,7 @@ unsafe fn test_lsx_vfcvth_d_s() { let a = u32x4::new(1051543000, 1042275304, 1038283216, 1063876621); let r = i64x2::new(4592649323212177408, 4606389677895712768); - assert_eq!(r, transmute(lsx_vfcvth_d_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vfcvth_d_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3409,7 +4496,7 @@ unsafe fn test_lsx_vfcvtl_s_h() { let a = i16x8::new(-21951, -13772, -17190, 9566, -19227, 9682, 13427, -30861); let r = i64x2::new(-4519784435355738112, 4371798972740354048); - assert_eq!(r, transmute(lsx_vfcvtl_s_h(transmute(a)))); + assert_eq!(r, transmute(lsx_vfcvtl_s_h(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3417,7 +4504,7 @@ unsafe fn test_lsx_vfcvtl_d_s() { let a = u32x4::new(1059809930, 1051084496, 1062618346, 1058273673); let r = i64x2::new(4604206389789720576, 4599521958080544768); - assert_eq!(r, transmute(lsx_vfcvtl_d_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vfcvtl_d_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3425,7 +4512,7 @@ unsafe fn test_lsx_vftint_w_s() { let a = u32x4::new(1064738153, 1040181800, 1064331056, 1050732566); let r = i64x2::new(1, 1); - assert_eq!(r, transmute(lsx_vftint_w_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vftint_w_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3433,7 +4520,7 @@ unsafe fn test_lsx_vftint_l_d() { let a = u64x2::new(4602244632405616462, 4606437548563176328); let r = i64x2::new(0, 1); - assert_eq!(r, transmute(lsx_vftint_l_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vftint_l_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3441,7 +4528,7 @@ unsafe fn test_lsx_vftint_wu_s() { let a = u32x4::new(1051598962, 1051261298, 1059326008, 1057784192); let r = i64x2::new(0, 4294967297); - assert_eq!(r, transmute(lsx_vftint_wu_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vftint_wu_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3449,7 +4536,7 @@ unsafe fn test_lsx_vftint_lu_d() { let a = u64x2::new(4605561240422589260, 4595241299507769712); let r = i64x2::new(1, 0); - assert_eq!(r, transmute(lsx_vftint_lu_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vftint_lu_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3457,7 +4544,7 @@ unsafe fn test_lsx_vftintrz_w_s() { let a = u32x4::new(1027659872, 1064207676, 1058472873, 1055740014); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vftintrz_w_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vftintrz_w_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3465,7 +4552,7 @@ unsafe fn test_lsx_vftintrz_l_d() { let a = u64x2::new(4605051539601556532, 4605129242354661923); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vftintrz_l_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vftintrz_l_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3473,7 +4560,7 @@ unsafe fn test_lsx_vftintrz_wu_s() { let a = u32x4::new(1060876751, 1053710034, 1057340881, 1055555596); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vftintrz_wu_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vftintrz_wu_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3481,7 +4568,7 @@ unsafe fn test_lsx_vftintrz_lu_d() { let a = u64x2::new(4598711097624940956, 4598268778109474002); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vftintrz_lu_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vftintrz_lu_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3489,7 +4576,7 @@ unsafe fn test_lsx_vffint_s_w() { let a = i32x4::new(81337967, 1396520141, 2124859806, 1655115736); let r = i64x2::new(5667351778062705614, 5676028806041521555); - assert_eq!(r, transmute(lsx_vffint_s_w(transmute(a)))); + assert_eq!(r, transmute(lsx_vffint_s_w(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3497,7 +4584,7 @@ unsafe fn test_lsx_vffint_d_l() { let a = i64x2::new(-1543454772280682525, -7672333112582708041); let r = i64x2::new(-4344448119835677720, -4333977527979901593); - assert_eq!(r, transmute(lsx_vffint_d_l(transmute(a)))); + assert_eq!(r, transmute(lsx_vffint_d_l(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3505,7 +4592,7 @@ unsafe fn test_lsx_vffint_s_wu() { let a = u32x4::new(2224947834, 194720725, 2248289069, 1131100007); let r = i64x2::new(5564675890493038082, 5658445755393114667); - assert_eq!(r, transmute(lsx_vffint_s_wu(transmute(a)))); + assert_eq!(r, transmute(lsx_vffint_s_wu(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3513,7 +4600,7 @@ unsafe fn test_lsx_vffint_d_lu() { let a = u64x2::new(11793247389644223387, 1356636411353166515); let r = i64x2::new(4892164017273962878, 4878194157796724979); - assert_eq!(r, transmute(lsx_vffint_d_lu(transmute(a)))); + assert_eq!(r, transmute(lsx_vffint_d_lu(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3526,7 +4613,13 @@ unsafe fn test_lsx_vandn_v() { ); let r = i64x2::new(184648152262214664, 2315143230533931624); - assert_eq!(r, transmute(lsx_vandn_v(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vandn_v( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3536,7 +4629,7 @@ unsafe fn test_lsx_vneg_b() { ); let r = i64x2::new(-6195839201974406282, 3566844512212398771); - assert_eq!(r, transmute(lsx_vneg_b(transmute(a)))); + assert_eq!(r, transmute(lsx_vneg_b(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3544,7 +4637,7 @@ unsafe fn test_lsx_vneg_h() { let a = i16x8::new(-6540, 25893, -2534, 29805, -28719, -16331, -20168, 14650); let r = i64x2::new(-8389350794815923828, -4123521786840387537); - assert_eq!(r, transmute(lsx_vneg_h(transmute(a)))); + assert_eq!(r, transmute(lsx_vneg_h(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3552,7 +4645,7 @@ unsafe fn test_lsx_vneg_w() { let a = i32x4::new(-927815384, -898911982, 716171852, -2025175544); let r = i64x2::new(3860797565600356056, 8698062733717804468); - assert_eq!(r, transmute(lsx_vneg_w(transmute(a)))); + assert_eq!(r, transmute(lsx_vneg_w(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3560,7 +4653,7 @@ unsafe fn test_lsx_vneg_d() { let a = i64x2::new(4241851098775470984, 2487122929432859927); let r = i64x2::new(-4241851098775470984, -2487122929432859927); - assert_eq!(r, transmute(lsx_vneg_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vneg_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3573,7 +4666,10 @@ unsafe fn test_lsx_vmuh_b() { ); let r = i64x2::new(931993372669836524, 2017024359980467698); - assert_eq!(r, transmute(lsx_vmuh_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmuh_b(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -3582,7 +4678,10 @@ unsafe fn test_lsx_vmuh_h() { let b = i16x8::new(-446, -16863, 19467, -13578, -9673, -26572, -7864, 9855); let r = i64x2::new(-1422322400225984462, -842721997477184351); - assert_eq!(r, transmute(lsx_vmuh_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmuh_h(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -3591,7 +4690,10 @@ unsafe fn test_lsx_vmuh_w() { let b = i32x4::new(-1684820454, 449222301, 1106076122, 431017950); let r = i64x2::new(-950505610786872114, 420439596918869732); - assert_eq!(r, transmute(lsx_vmuh_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmuh_w(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -3600,7 +4702,10 @@ unsafe fn test_lsx_vmuh_d() { let b = i64x2::new(-1208434038665242614, -6078343251861677818); let r = i64x2::new(-121343209662433286, 284995587689374477); - assert_eq!(r, transmute(lsx_vmuh_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmuh_d(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -3613,7 +4718,13 @@ unsafe fn test_lsx_vmuh_bu() { ); let r = i64x2::new(8725461799780227590, -3369022092985820632); - assert_eq!(r, transmute(lsx_vmuh_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmuh_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3622,7 +4733,13 @@ unsafe fn test_lsx_vmuh_hu() { let b = u16x8::new(14769, 6489, 58866, 5997, 46648, 26325, 42186, 26942); let r = i64x2::new(1572068217944938757, 4366267597274655896); - assert_eq!(r, transmute(lsx_vmuh_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmuh_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3631,7 +4748,13 @@ unsafe fn test_lsx_vmuh_wu() { let b = u32x4::new(1981234883, 1290836259, 1284878577, 702668871); let r = i64x2::new(4011887256539048298, 960560772888018584); - assert_eq!(r, transmute(lsx_vmuh_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmuh_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3640,7 +4763,13 @@ unsafe fn test_lsx_vmuh_du() { let b = u64x2::new(14805542397189366587, 10025341254588295994); let r = i64x2::new(-9132083796568587258, 2493261783600858707); - assert_eq!(r, transmute(lsx_vmuh_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmuh_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3650,7 +4779,7 @@ unsafe fn test_lsx_vsllwil_h_b() { ); let r = i64x2::new(-990777899147527584, 126109727303143360); - assert_eq!(r, transmute(lsx_vsllwil_h_b::<5>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsllwil_h_b::<5>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3658,7 +4787,7 @@ unsafe fn test_lsx_vsllwil_w_h() { let a = i16x8::new(25135, -4241, 25399, -32451, 5597, -16847, 3192, -14694); let r = i64x2::new(-9326057613926912, -71360503652913664); - assert_eq!(r, transmute(lsx_vsllwil_w_h::<9>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsllwil_w_h::<9>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3666,7 +4795,7 @@ unsafe fn test_lsx_vsllwil_d_w() { let a = i32x4::new(1472328927, -2106442262, 379100488, -607174188); let r = i64x2::new(6030659284992, -8627987505152); - assert_eq!(r, transmute(lsx_vsllwil_d_w::<12>(transmute(a)))); + assert_eq!(r, transmute(lsx_vsllwil_d_w::<12>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -3676,7 +4805,10 @@ unsafe fn test_lsx_vsllwil_hu_bu() { ); let r = i64x2::new(6953679870551405312, 6809531147446388736); - assert_eq!(r, transmute(lsx_vsllwil_hu_bu::<7>(transmute(a)))); + assert_eq!( + r, + transmute(lsx_vsllwil_hu_bu::<7>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lsx")] @@ -3684,7 +4816,10 @@ unsafe fn test_lsx_vsllwil_wu_hu() { let a = u16x8::new(370, 47410, 29611, 6206, 10390, 34658, 65264, 5264); let r = i64x2::new(52127846272954880, 6823569169558272); - assert_eq!(r, transmute(lsx_vsllwil_wu_hu::<8>(transmute(a)))); + assert_eq!( + r, + transmute(lsx_vsllwil_wu_hu::<8>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lsx")] @@ -3692,7 +4827,10 @@ unsafe fn test_lsx_vsllwil_du_wu() { let a = u32x4::new(3249798491, 4098547305, 1101510259, 3478509641); let r = i64x2::new(13630642809995264, 17190553355550720); - assert_eq!(r, transmute(lsx_vsllwil_du_wu::<22>(transmute(a)))); + assert_eq!( + r, + transmute(lsx_vsllwil_du_wu::<22>(black_box(transmute(a)))) + ); } #[simd_test(enable = "lsx")] @@ -3701,7 +4839,13 @@ unsafe fn test_lsx_vsran_b_h() { let b = i16x8::new(-12507, -16997, -17826, 5682, -298, -28572, -8117, -13478); let r = i64x2::new(-864943573596831881, 0); - assert_eq!(r, transmute(lsx_vsran_b_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsran_b_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3710,7 +4854,13 @@ unsafe fn test_lsx_vsran_h_w() { let b = i32x4::new(-52337348, -677553123, -58200260, -1473338606); let r = i64x2::new(1267763303694925820, 0); - assert_eq!(r, transmute(lsx_vsran_h_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsran_h_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3719,7 +4869,13 @@ unsafe fn test_lsx_vsran_w_d() { let b = i64x2::new(-8585295495893484131, -2657141976436452013); let r = i64x2::new(-5882350952887806270, 0); - assert_eq!(r, transmute(lsx_vsran_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsran_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3728,7 +4884,13 @@ unsafe fn test_lsx_vssran_b_h() { let b = i16x8::new(9459, 15241, 22170, 28027, 5348, 14784, 22613, -9469); let r = i64x2::new(9187483431610086528, 0); - assert_eq!(r, transmute(lsx_vssran_b_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssran_b_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3737,7 +4899,13 @@ unsafe fn test_lsx_vssran_h_w() { let b = i32x4::new(2070726003, -944816867, -160621862, -1222036466); let r = i64x2::new(-5219109151313101350, 0); - assert_eq!(r, transmute(lsx_vssran_h_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssran_h_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3746,7 +4914,13 @@ unsafe fn test_lsx_vssran_w_d() { let b = i64x2::new(-7078666005882550400, -2564990402652718339); let r = i64x2::new(-15032385536, 0); - assert_eq!(r, transmute(lsx_vssran_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssran_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3755,7 +4929,13 @@ unsafe fn test_lsx_vssran_bu_h() { let b = u16x8::new(2372, 26267, 4722, 47876, 44857, 55242, 45998, 51450); let r = i64x2::new(47227865344, 0); - assert_eq!(r, transmute(lsx_vssran_bu_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssran_bu_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3764,7 +4944,13 @@ unsafe fn test_lsx_vssran_hu_w() { let b = u32x4::new(2085279153, 2679576985, 2935643238, 3797496208); let r = i64x2::new(281470684234479, 0); - assert_eq!(r, transmute(lsx_vssran_hu_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssran_hu_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3773,7 +4959,13 @@ unsafe fn test_lsx_vssran_wu_d() { let b = u64x2::new(3904652404244024971, 4230656884168675704); let r = i64x2::new(536870912000, 0); - assert_eq!(r, transmute(lsx_vssran_wu_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssran_wu_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3782,7 +4974,13 @@ unsafe fn test_lsx_vsrarn_b_h() { let b = i16x8::new(-19071, -903, 11542, -25909, 24111, 14882, -27192, -8283); let r = i64x2::new(7076043428318610384, 0); - assert_eq!(r, transmute(lsx_vsrarn_b_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsrarn_b_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3791,7 +4989,13 @@ unsafe fn test_lsx_vsrarn_h_w() { let b = i32x4::new(-1571698573, 1467958613, -1857488008, 424713310); let r = i64x2::new(498163119212, 0); - assert_eq!(r, transmute(lsx_vsrarn_h_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsrarn_h_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3800,7 +5004,13 @@ unsafe fn test_lsx_vsrarn_w_d() { let b = i64x2::new(-8645668865455529235, -3129277582817496880); let r = i64x2::new(-8628090759335017621, 0); - assert_eq!(r, transmute(lsx_vsrarn_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsrarn_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3809,7 +5019,13 @@ unsafe fn test_lsx_vssrarn_b_h() { let b = i16x8::new(24298, 2343, 24641, 20910, 3142, -1171, 25850, 15932); let r = i64x2::new(-148338468081139694, 0); - assert_eq!(r, transmute(lsx_vssrarn_b_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssrarn_b_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3818,7 +5034,13 @@ unsafe fn test_lsx_vssrarn_h_w() { let b = i32x4::new(1911424854, -931292983, -1710824608, -1179580317); let r = i64x2::new(-9223231301513904204, 0); - assert_eq!(r, transmute(lsx_vssrarn_h_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssrarn_h_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3827,7 +5049,13 @@ unsafe fn test_lsx_vssrarn_w_d() { let b = i64x2::new(2843689038926761304, -6830262024912907383); let r = i64x2::new(-9223372034707292161, 0); - assert_eq!(r, transmute(lsx_vssrarn_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssrarn_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3836,7 +5064,13 @@ unsafe fn test_lsx_vssrarn_bu_h() { let b = u16x8::new(60210, 40155, 14296, 25577, 1550, 1674, 5330, 10645); let r = i64x2::new(10999415373897, 0); - assert_eq!(r, transmute(lsx_vssrarn_bu_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssrarn_bu_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3845,7 +5079,13 @@ unsafe fn test_lsx_vssrarn_hu_w() { let b = u32x4::new(3570029841, 3229468238, 1070101998, 3159433736); let r = i64x2::new(281474976645120, 0); - assert_eq!(r, transmute(lsx_vssrarn_hu_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssrarn_hu_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3854,7 +5094,13 @@ unsafe fn test_lsx_vssrarn_wu_d() { let b = u64x2::new(1112771813772164907, 646071836375127186); let r = i64x2::new(963446, 0); - assert_eq!(r, transmute(lsx_vssrarn_wu_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssrarn_wu_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3863,7 +5109,13 @@ unsafe fn test_lsx_vsrln_b_h() { let b = i16x8::new(-11667, 13077, -23656, 5150, -23771, -31329, 20729, 15169); let r = i64x2::new(23363148983015937, 0); - assert_eq!(r, transmute(lsx_vsrln_b_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsrln_b_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3872,7 +5124,13 @@ unsafe fn test_lsx_vsrln_h_w() { let b = i32x4::new(1775989751, -1602688801, -801213995, -1801759515); let r = i64x2::new(-7033214568759295968, 0); - assert_eq!(r, transmute(lsx_vsrln_h_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsrln_h_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3881,7 +5139,13 @@ unsafe fn test_lsx_vsrln_w_d() { let b = i64x2::new(-1428152872702150626, 3907864416256094744); let r = i64x2::new(-8718771486483115547, 0); - assert_eq!(r, transmute(lsx_vsrln_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsrln_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3890,7 +5154,13 @@ unsafe fn test_lsx_vssrln_bu_h() { let b = u16x8::new(41072, 41125, 44619, 49581, 20733, 905, 47558, 7801); let r = i64x2::new(8862857593125412863, 0); - assert_eq!(r, transmute(lsx_vssrln_bu_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssrln_bu_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3899,7 +5169,13 @@ unsafe fn test_lsx_vssrln_hu_w() { let b = u32x4::new(1325069171, 1380839173, 3495604120, 2839043866); let r = i64x2::new(16889194387279379, 0); - assert_eq!(r, transmute(lsx_vssrln_hu_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssrln_hu_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3908,7 +5184,13 @@ unsafe fn test_lsx_vssrln_wu_d() { let b = u64x2::new(3908262745817581251, 17131627096934512209); let r = i64x2::new(-1, 0); - assert_eq!(r, transmute(lsx_vssrln_wu_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssrln_wu_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3917,7 +5199,13 @@ unsafe fn test_lsx_vsrlrn_b_h() { let b = i16x8::new(22830, -27866, -24616, -9547, 11336, 320, 19908, 7056); let r = i64x2::new(-4888418841542521598, 0); - assert_eq!(r, transmute(lsx_vsrlrn_b_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsrlrn_b_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3926,7 +5214,13 @@ unsafe fn test_lsx_vsrlrn_h_w() { let b = i32x4::new(1387862348, 119424523, 185407104, 1890720739); let r = i64x2::new(2222313691660711041, 0); - assert_eq!(r, transmute(lsx_vsrlrn_h_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsrlrn_h_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3935,7 +5229,13 @@ unsafe fn test_lsx_vsrlrn_w_d() { let b = i64x2::new(-8550351213501194562, 7071641301481388656); let r = i64x2::new(182866822561795, 0); - assert_eq!(r, transmute(lsx_vsrlrn_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsrlrn_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3944,7 +5244,13 @@ unsafe fn test_lsx_vssrlrn_bu_h() { let b = u16x8::new(51122, 39148, 45511, 57479, 62603, 43668, 5537, 61004); let r = i64x2::new(432344477600776959, 0); - assert_eq!(r, transmute(lsx_vssrlrn_bu_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssrlrn_bu_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3953,7 +5259,13 @@ unsafe fn test_lsx_vssrlrn_hu_w() { let b = u32x4::new(1618795892, 3678356443, 862445734, 2115250342); let r = i64x2::new(-4293983341, 0); - assert_eq!(r, transmute(lsx_vssrlrn_hu_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssrlrn_hu_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3962,7 +5274,13 @@ unsafe fn test_lsx_vssrlrn_wu_d() { let b = u64x2::new(13406765083608623828, 7214649593148131096); let r = i64x2::new(-1, 0); - assert_eq!(r, transmute(lsx_vssrlrn_wu_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssrlrn_wu_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -3977,7 +5295,10 @@ unsafe fn test_lsx_vfrstpi_b() { assert_eq!( r, - transmute(lsx_vfrstpi_b::<28>(transmute(a), transmute(b))) + transmute(lsx_vfrstpi_b::<28>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -3987,7 +5308,13 @@ unsafe fn test_lsx_vfrstpi_h() { let b = i16x8::new(9590, -8044, 15088, 4172, 1721, 27581, -19895, -25679); let r = i64x2::new(-4160352588467724069, 5959935604366651239); - assert_eq!(r, transmute(lsx_vfrstpi_h::<1>(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfrstpi_h::<1>( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4005,7 +5332,11 @@ unsafe fn test_lsx_vfrstp_b() { assert_eq!( r, - transmute(lsx_vfrstp_b(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vfrstp_b( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -4018,7 +5349,11 @@ unsafe fn test_lsx_vfrstp_h() { assert_eq!( r, - transmute(lsx_vfrstp_h(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vfrstp_h( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -4030,7 +5365,10 @@ unsafe fn test_lsx_vshuf4i_d() { assert_eq!( r, - transmute(lsx_vshuf4i_d::<153>(transmute(a), transmute(b))) + transmute(lsx_vshuf4i_d::<153>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -4041,7 +5379,7 @@ unsafe fn test_lsx_vbsrl_v() { ); let r = i64x2::new(4570595419764160432, 56); - assert_eq!(r, transmute(lsx_vbsrl_v::<7>(transmute(a)))); + assert_eq!(r, transmute(lsx_vbsrl_v::<7>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4051,7 +5389,7 @@ unsafe fn test_lsx_vbsll_v() { ); let r = i64x2::new(0, -1801439850948198400); - assert_eq!(r, transmute(lsx_vbsll_v::<15>(transmute(a)))); + assert_eq!(r, transmute(lsx_vbsll_v::<15>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4066,7 +5404,10 @@ unsafe fn test_lsx_vextrins_b() { assert_eq!( r, - transmute(lsx_vextrins_b::<21>(transmute(a), transmute(b))) + transmute(lsx_vextrins_b::<21>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -4078,7 +5419,10 @@ unsafe fn test_lsx_vextrins_h() { assert_eq!( r, - transmute(lsx_vextrins_h::<33>(transmute(a), transmute(b))) + transmute(lsx_vextrins_h::<33>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -4090,7 +5434,10 @@ unsafe fn test_lsx_vextrins_w() { assert_eq!( r, - transmute(lsx_vextrins_w::<57>(transmute(a), transmute(b))) + transmute(lsx_vextrins_w::<57>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -4102,7 +5449,10 @@ unsafe fn test_lsx_vextrins_d() { assert_eq!( r, - transmute(lsx_vextrins_d::<62>(transmute(a), transmute(b))) + transmute(lsx_vextrins_d::<62>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -4113,7 +5463,7 @@ unsafe fn test_lsx_vmskltz_b() { ); let r = i64x2::new(40038, 0); - assert_eq!(r, transmute(lsx_vmskltz_b(transmute(a)))); + assert_eq!(r, transmute(lsx_vmskltz_b(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4121,7 +5471,7 @@ unsafe fn test_lsx_vmskltz_h() { let a = i16x8::new(16730, 29121, -23447, -8647, -22303, 21817, 30964, -27069); let r = i64x2::new(156, 0); - assert_eq!(r, transmute(lsx_vmskltz_h(transmute(a)))); + assert_eq!(r, transmute(lsx_vmskltz_h(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4129,7 +5479,7 @@ unsafe fn test_lsx_vmskltz_w() { let a = i32x4::new(-657282776, -1247210048, 162595942, 949871015); let r = i64x2::new(3, 0); - assert_eq!(r, transmute(lsx_vmskltz_w(transmute(a)))); + assert_eq!(r, transmute(lsx_vmskltz_w(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4137,7 +5487,7 @@ unsafe fn test_lsx_vmskltz_d() { let a = i64x2::new(7728638770319849738, 4250984610820351699); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vmskltz_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vmskltz_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4150,7 +5500,13 @@ unsafe fn test_lsx_vsigncov_b() { ); let r = i64x2::new(-9074694153930972472, 1986788453588057010); - assert_eq!(r, transmute(lsx_vsigncov_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsigncov_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4159,7 +5515,13 @@ unsafe fn test_lsx_vsigncov_h() { let b = i16x8::new(27367, 4727, -2962, 14937, 26207, -19075, -26630, 10708); let r = i64x2::new(-4204122973533661927, -3013866947575178847); - assert_eq!(r, transmute(lsx_vsigncov_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsigncov_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4168,7 +5530,13 @@ unsafe fn test_lsx_vsigncov_w() { let b = i32x4::new(-1719915889, 290419288, 202835952, -1715336967); let r = i64x2::new(-1247341342367689359, -7367316170792699888); - assert_eq!(r, transmute(lsx_vsigncov_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsigncov_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4177,7 +5545,13 @@ unsafe fn test_lsx_vsigncov_d() { let b = i64x2::new(-7146260093067324952, -4263419240070336957); let r = i64x2::new(-7146260093067324952, 4263419240070336957); - assert_eq!(r, transmute(lsx_vsigncov_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsigncov_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4189,7 +5563,11 @@ unsafe fn test_lsx_vfmadd_s() { assert_eq!( r, - transmute(lsx_vfmadd_s(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vfmadd_s( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -4202,7 +5580,11 @@ unsafe fn test_lsx_vfmadd_d() { assert_eq!( r, - transmute(lsx_vfmadd_d(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vfmadd_d( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -4215,7 +5597,11 @@ unsafe fn test_lsx_vfmsub_s() { assert_eq!( r, - transmute(lsx_vfmsub_s(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vfmsub_s( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -4228,7 +5614,11 @@ unsafe fn test_lsx_vfmsub_d() { assert_eq!( r, - transmute(lsx_vfmsub_d(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vfmsub_d( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -4241,7 +5631,11 @@ unsafe fn test_lsx_vfnmadd_s() { assert_eq!( r, - transmute(lsx_vfnmadd_s(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vfnmadd_s( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -4254,7 +5648,11 @@ unsafe fn test_lsx_vfnmadd_d() { assert_eq!( r, - transmute(lsx_vfnmadd_d(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vfnmadd_d( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -4267,7 +5665,11 @@ unsafe fn test_lsx_vfnmsub_s() { assert_eq!( r, - transmute(lsx_vfnmsub_s(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vfnmsub_s( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -4280,7 +5682,11 @@ unsafe fn test_lsx_vfnmsub_d() { assert_eq!( r, - transmute(lsx_vfnmsub_d(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vfnmsub_d( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -4289,7 +5695,7 @@ unsafe fn test_lsx_vftintrne_w_s() { let a = u32x4::new(1031214064, 1059673230, 1042813024, 1053602874); let r = i64x2::new(4294967296, 0); - assert_eq!(r, transmute(lsx_vftintrne_w_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vftintrne_w_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4297,7 +5703,7 @@ unsafe fn test_lsx_vftintrne_l_d() { let a = u64x2::new(4606989588359571497, 4604713245380178790); let r = i64x2::new(1, 1); - assert_eq!(r, transmute(lsx_vftintrne_l_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vftintrne_l_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4305,7 +5711,7 @@ unsafe fn test_lsx_vftintrp_w_s() { let a = u32x4::new(1061716225, 1050491008, 1064711040, 1065018777); let r = i64x2::new(4294967297, 4294967297); - assert_eq!(r, transmute(lsx_vftintrp_w_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vftintrp_w_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4313,7 +5719,7 @@ unsafe fn test_lsx_vftintrp_l_d() { let a = u64x2::new(4587516915944025472, 4601504548481216392); let r = i64x2::new(1, 1); - assert_eq!(r, transmute(lsx_vftintrp_l_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vftintrp_l_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4321,7 +5727,7 @@ unsafe fn test_lsx_vftintrm_w_s() { let a = u32x4::new(1045772456, 1065200707, 1061587478, 1035467272); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vftintrm_w_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vftintrm_w_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4329,7 +5735,7 @@ unsafe fn test_lsx_vftintrm_l_d() { let a = u64x2::new(4597123259408216804, 4594399417822716772); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vftintrm_l_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vftintrm_l_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4338,7 +5744,13 @@ unsafe fn test_lsx_vftint_w_d() { let b = u64x2::new(4606905060326467647, 4606985586417166381); let r = i64x2::new(4294967297, 0); - assert_eq!(r, transmute(lsx_vftint_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vftint_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4347,7 +5759,13 @@ unsafe fn test_lsx_vffint_s_l() { let b = i64x2::new(5814449889729512723, -111756032377486319); let r = i64x2::new(-2610252963668467161, 6669016150524087533); - assert_eq!(r, transmute(lsx_vffint_s_l(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vffint_s_l( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4356,7 +5774,13 @@ unsafe fn test_lsx_vftintrz_w_d() { let b = u64x2::new(4599106720144900270, 4600531579473237336); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vftintrz_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vftintrz_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4365,7 +5789,13 @@ unsafe fn test_lsx_vftintrp_w_d() { let b = u64x2::new(4606104970322966899, 4595679410565085836); let r = i64x2::new(4294967297, 4294967297); - assert_eq!(r, transmute(lsx_vftintrp_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vftintrp_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4374,7 +5804,13 @@ unsafe fn test_lsx_vftintrm_w_d() { let b = u64x2::new(4606733822200032543, 4589510164179968984); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vftintrm_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vftintrm_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4383,7 +5819,13 @@ unsafe fn test_lsx_vftintrne_w_d() { let b = u64x2::new(4599197176714081204, 4605745859931721980); let r = i64x2::new(4294967296, 0); - assert_eq!(r, transmute(lsx_vftintrne_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vftintrne_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4391,7 +5833,7 @@ unsafe fn test_lsx_vftintl_l_s() { let a = u32x4::new(1058856635, 1060563398, 1061422616, 1056124918); let r = i64x2::new(1, 1); - assert_eq!(r, transmute(lsx_vftintl_l_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vftintl_l_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4399,7 +5841,7 @@ unsafe fn test_lsx_vftinth_l_s() { let a = u32x4::new(1045383680, 1040752748, 1061879518, 1054801708); let r = i64x2::new(1, 0); - assert_eq!(r, transmute(lsx_vftinth_l_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vftinth_l_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4407,7 +5849,7 @@ unsafe fn test_lsx_vffinth_d_w() { let a = i32x4::new(517100418, -188510766, 949226647, -87467194); let r = i64x2::new(4741245898611228672, -4497729803343888384); - assert_eq!(r, transmute(lsx_vffinth_d_w(transmute(a)))); + assert_eq!(r, transmute(lsx_vffinth_d_w(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4415,7 +5857,7 @@ unsafe fn test_lsx_vffintl_d_w() { let a = i32x4::new(1273684401, -2137528906, -2109294912, -1646387998); let r = i64x2::new(4743129027571613696, -4476619782820462592); - assert_eq!(r, transmute(lsx_vffintl_d_w(transmute(a)))); + assert_eq!(r, transmute(lsx_vffintl_d_w(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4423,7 +5865,7 @@ unsafe fn test_lsx_vftintrzl_l_s() { let a = u32x4::new(1031186688, 987838976, 1034565688, 1061017371); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vftintrzl_l_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vftintrzl_l_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4431,7 +5873,7 @@ unsafe fn test_lsx_vftintrzh_l_s() { let a = u32x4::new(1049433828, 1048953580, 1060964637, 1059899586); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vftintrzh_l_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vftintrzh_l_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4439,7 +5881,7 @@ unsafe fn test_lsx_vftintrpl_l_s() { let a = u32x4::new(1061834803, 1064858941, 1060475110, 1063896216); let r = i64x2::new(1, 1); - assert_eq!(r, transmute(lsx_vftintrpl_l_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vftintrpl_l_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4447,7 +5889,7 @@ unsafe fn test_lsx_vftintrph_l_s() { let a = u32x4::new(1059691939, 1065187151, 1059017027, 1061117394); let r = i64x2::new(1, 1); - assert_eq!(r, transmute(lsx_vftintrph_l_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vftintrph_l_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4455,7 +5897,7 @@ unsafe fn test_lsx_vftintrml_l_s() { let a = u32x4::new(1062985651, 1065211455, 1056421466, 1057373572); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vftintrml_l_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vftintrml_l_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4463,7 +5905,7 @@ unsafe fn test_lsx_vftintrmh_l_s() { let a = u32x4::new(1050224290, 1063763666, 1057677270, 1063622234); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vftintrmh_l_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vftintrmh_l_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4471,7 +5913,7 @@ unsafe fn test_lsx_vftintrnel_l_s() { let a = u32x4::new(1060174609, 1050974638, 1047193308, 1062040876); let r = i64x2::new(1, 0); - assert_eq!(r, transmute(lsx_vftintrnel_l_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vftintrnel_l_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4479,7 +5921,7 @@ unsafe fn test_lsx_vftintrneh_l_s() { let a = u32x4::new(1055675382, 1036879184, 1064176794, 1063791852); let r = i64x2::new(1, 1); - assert_eq!(r, transmute(lsx_vftintrneh_l_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vftintrneh_l_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4487,7 +5929,7 @@ unsafe fn test_lsx_vfrintrne_s() { let a = u32x4::new(1054667842, 1061395025, 1062986478, 1062529334); let r = i64x2::new(4575657221408423936, 4575657222473777152); - assert_eq!(r, transmute(lsx_vfrintrne_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vfrintrne_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4495,7 +5937,7 @@ unsafe fn test_lsx_vfrintrne_d() { let a = u64x2::new(4603260356641870565, 4601614335120512898); let r = i64x2::new(4607182418800017408, 0); - assert_eq!(r, transmute(lsx_vfrintrne_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vfrintrne_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4503,7 +5945,7 @@ unsafe fn test_lsx_vfrintrz_s() { let a = u32x4::new(1063039577, 1033416832, 1052369306, 1057885024); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfrintrz_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vfrintrz_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4511,7 +5953,7 @@ unsafe fn test_lsx_vfrintrz_d() { let a = u64x2::new(4601515428088814484, 4604735152905786794); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfrintrz_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vfrintrz_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4519,7 +5961,7 @@ unsafe fn test_lsx_vfrintrp_s() { let a = u32x4::new(1061968959, 1056597596, 1064869916, 1058742360); let r = i64x2::new(4575657222473777152, 4575657222473777152); - assert_eq!(r, transmute(lsx_vfrintrp_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vfrintrp_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4527,7 +5969,7 @@ unsafe fn test_lsx_vfrintrp_d() { let a = u64x2::new(4603531792479663401, 4587997630530425392); let r = i64x2::new(4607182418800017408, 4607182418800017408); - assert_eq!(r, transmute(lsx_vfrintrp_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vfrintrp_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4535,7 +5977,7 @@ unsafe fn test_lsx_vfrintrm_s() { let a = u32x4::new(1058024441, 1044087184, 1059777964, 1050835426); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfrintrm_s(transmute(a)))); + assert_eq!(r, transmute(lsx_vfrintrm_s(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4543,7 +5985,7 @@ unsafe fn test_lsx_vfrintrm_d() { let a = u64x2::new(4589388034824743512, 4606800774570289382); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfrintrm_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vfrintrm_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -4556,7 +5998,7 @@ unsafe fn test_lsx_vstelm_b() { ]; let r = i64x2::new(2624488095427530938, -2742340989646681128); - lsx_vstelm_b::<0, 0>(transmute(a), o.as_mut_ptr()); + lsx_vstelm_b::<0, 0>(black_box(transmute(a)), o.as_mut_ptr()); assert_eq!(r, transmute(o)); } @@ -4568,7 +6010,7 @@ unsafe fn test_lsx_vstelm_h() { ]; let r = i64x2::new(-5777879910580360821, -8010388107109560809); - lsx_vstelm_h::<0, 1>(transmute(a), o.as_mut_ptr()); + lsx_vstelm_h::<0, 1>(black_box(transmute(a)), o.as_mut_ptr()); assert_eq!(r, transmute(o)); } @@ -4580,7 +6022,7 @@ unsafe fn test_lsx_vstelm_w() { ]; let r = i64x2::new(-7107014201697162202, -4954294907532227136); - lsx_vstelm_w::<0, 3>(transmute(a), o.as_mut_ptr()); + lsx_vstelm_w::<0, 3>(black_box(transmute(a)), o.as_mut_ptr()); assert_eq!(r, transmute(o)); } @@ -4592,7 +6034,7 @@ unsafe fn test_lsx_vstelm_d() { ]; let r = i64x2::new(2628828971609511929, -1577551211298588582); - lsx_vstelm_d::<0, 0>(transmute(a), o.as_mut_ptr()); + lsx_vstelm_d::<0, 0>(black_box(transmute(a)), o.as_mut_ptr()); assert_eq!(r, transmute(o)); } @@ -4602,7 +6044,13 @@ unsafe fn test_lsx_vaddwev_d_w() { let b = i32x4::new(-2105551735, -1478351177, 1027048582, -607110700); let r = i64x2::new(-3995454036, 2115628395); - assert_eq!(r, transmute(lsx_vaddwev_d_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwev_d_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4611,7 +6059,13 @@ unsafe fn test_lsx_vaddwev_w_h() { let b = i16x8::new(-17479, -32614, 24343, 25426, -14077, -12419, 10115, 23013); let r = i64x2::new(57531086920254, -11304353922851); - assert_eq!(r, transmute(lsx_vaddwev_w_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwev_w_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4624,7 +6078,13 @@ unsafe fn test_lsx_vaddwev_h_b() { ); let r = i64x2::new(-6191796646052051, 32369798417022969); - assert_eq!(r, transmute(lsx_vaddwev_h_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwev_h_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4633,7 +6093,13 @@ unsafe fn test_lsx_vaddwod_d_w() { let b = i32x4::new(420515981, 473447119, 1471756335, 1044924117); let r = i64x2::new(126219465, 3020814787); - assert_eq!(r, transmute(lsx_vaddwod_d_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwod_d_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4642,7 +6108,13 @@ unsafe fn test_lsx_vaddwod_w_h() { let b = i16x8::new(-26581, -22301, 18214, -3616, -24489, 12150, -10765, -24232); let r = i64x2::new(-151719719748481, -112154480997307); - assert_eq!(r, transmute(lsx_vaddwod_w_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwod_w_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4655,7 +6127,13 @@ unsafe fn test_lsx_vaddwod_h_b() { ); let r = i64x2::new(-18014780768845678, 14636475441676413); - assert_eq!(r, transmute(lsx_vaddwod_h_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwod_h_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4664,7 +6142,13 @@ unsafe fn test_lsx_vaddwev_d_wu() { let b = u32x4::new(1482213353, 1001198416, 3345983326, 2244256337); let r = i64x2::new(4022160583, 4539965521); - assert_eq!(r, transmute(lsx_vaddwev_d_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwev_d_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4673,7 +6157,13 @@ unsafe fn test_lsx_vaddwev_w_hu() { let b = u16x8::new(28483, 24704, 9817, 62062, 47674, 8032, 29897, 62737); let r = i64x2::new(176725019407839, 226649719257774); - assert_eq!(r, transmute(lsx_vaddwev_w_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwev_w_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4686,7 +6176,13 @@ unsafe fn test_lsx_vaddwev_h_bu() { ); let r = i64x2::new(85006057160704351, 47850943627526421); - assert_eq!(r, transmute(lsx_vaddwev_h_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwev_h_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4695,7 +6191,13 @@ unsafe fn test_lsx_vaddwod_d_wu() { let b = u32x4::new(2782520439, 2496077290, 2678772394, 196273109); let r = i64x2::new(4147231270, 2289089430); - assert_eq!(r, transmute(lsx_vaddwod_d_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwod_d_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4704,7 +6206,13 @@ unsafe fn test_lsx_vaddwod_w_hu() { let b = u16x8::new(20353, 34039, 21222, 4948, 58293, 4766, 51360, 37497); let r = i64x2::new(82519206727777, 206875689791292); - assert_eq!(r, transmute(lsx_vaddwod_w_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwod_w_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4717,7 +6225,13 @@ unsafe fn test_lsx_vaddwod_h_bu() { ); let r = i64x2::new(73466429242409013, 32932877227196635); - assert_eq!(r, transmute(lsx_vaddwod_h_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwod_h_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4726,7 +6240,13 @@ unsafe fn test_lsx_vaddwev_d_wu_w() { let b = i32x4::new(-1308530150, 1427930358, 1723198474, 1987356336); let r = i64x2::new(2478528121, 3014708115); - assert_eq!(r, transmute(lsx_vaddwev_d_wu_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwev_d_wu_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4735,7 +6255,13 @@ unsafe fn test_lsx_vaddwev_w_hu_h() { let b = i16x8::new(-11621, -6593, 7431, -1189, -12361, -15174, 16182, -32434); let r = i64x2::new(64158221463769, 194716637325930); - assert_eq!(r, transmute(lsx_vaddwev_w_hu_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwev_w_hu_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4748,7 +6274,13 @@ unsafe fn test_lsx_vaddwev_h_bu_b() { ); let r = i64x2::new(71776235037065355, -7880749580746636); - assert_eq!(r, transmute(lsx_vaddwev_h_bu_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwev_h_bu_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4757,7 +6289,13 @@ unsafe fn test_lsx_vaddwod_d_wu_w() { let b = i32x4::new(-1646368557, 586112311, 376247963, 1048800083); let r = i64x2::new(3497092601, 3306080422); - assert_eq!(r, transmute(lsx_vaddwod_d_wu_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwod_d_wu_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4766,7 +6304,13 @@ unsafe fn test_lsx_vaddwod_w_hu_h() { let b = i16x8::new(31700, 22725, 14068, -14860, -28839, -14513, -1195, 27082); let r = i64x2::new(-10273561712908, 369560461022726); - assert_eq!(r, transmute(lsx_vaddwod_w_hu_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwod_w_hu_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4779,7 +6323,13 @@ unsafe fn test_lsx_vaddwod_h_bu_b() { ); let r = i64x2::new(49259327819481212, 19140654913421439); - assert_eq!(r, transmute(lsx_vaddwod_h_bu_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwod_h_bu_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4788,7 +6338,13 @@ unsafe fn test_lsx_vsubwev_d_w() { let b = i32x4::new(-2090701374, 629564229, -1170676885, 1069800209); let r = i64x2::new(4070621277, 63900397); - assert_eq!(r, transmute(lsx_vsubwev_d_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsubwev_d_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4797,7 +6353,13 @@ unsafe fn test_lsx_vsubwev_w_h() { let b = i16x8::new(-23957, 9416, -29569, -13210, 5333, 8420, 18648, -24201); let r = i64x2::new(228187317494294, -105188044063209); - assert_eq!(r, transmute(lsx_vsubwev_w_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsubwev_w_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4810,7 +6372,13 @@ unsafe fn test_lsx_vsubwev_h_b() { ); let r = i64x2::new(-41939247539617653, -14355228098887689); - assert_eq!(r, transmute(lsx_vsubwev_h_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsubwev_h_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4819,7 +6387,13 @@ unsafe fn test_lsx_vsubwod_d_w() { let b = i32x4::new(1436617964, -45524609, 502994793, -2039550077); let r = i64x2::new(-1037882987, 3497647797); - assert_eq!(r, transmute(lsx_vsubwod_d_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsubwod_d_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4828,7 +6402,13 @@ unsafe fn test_lsx_vsubwod_w_h() { let b = i16x8::new(-1276, 12669, 24115, 19617, -26739, 1910, -757, 23994); let r = i64x2::new(-158286724709540, -182411556002309); - assert_eq!(r, transmute(lsx_vsubwod_w_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsubwod_w_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4841,7 +6421,13 @@ unsafe fn test_lsx_vsubwod_h_b() { ); let r = i64x2::new(23925540523802608, 562958549909362); - assert_eq!(r, transmute(lsx_vsubwod_h_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsubwod_h_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4850,7 +6436,13 @@ unsafe fn test_lsx_vsubwev_d_wu() { let b = u32x4::new(1691253880, 1939268473, 1629937431, 2921768539); let r = i64x2::new(974418830, 1402878171); - assert_eq!(r, transmute(lsx_vsubwev_d_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsubwev_d_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4859,7 +6451,13 @@ unsafe fn test_lsx_vsubwev_w_hu() { let b = u16x8::new(15957, 42770, 43138, 30319, 50823, 18089, 64120, 18054); let r = i64x2::new(-41807211666923, -194858371266981); - assert_eq!(r, transmute(lsx_vsubwev_w_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsubwev_w_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4872,7 +6470,13 @@ unsafe fn test_lsx_vsubwev_h_bu() { ); let r = i64x2::new(-1407181617889293, 47851128289689387); - assert_eq!(r, transmute(lsx_vsubwev_h_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsubwev_h_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4881,7 +6485,13 @@ unsafe fn test_lsx_vsubwod_d_wu() { let b = u32x4::new(103354715, 19070238, 1662532733, 3761231766); let r = i64x2::new(3487028338, -1512426824); - assert_eq!(r, transmute(lsx_vsubwod_d_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsubwod_d_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4890,7 +6500,13 @@ unsafe fn test_lsx_vsubwod_w_hu() { let b = u16x8::new(21739, 45406, 21733, 63910, 6659, 16020, 1211, 637); let r = i64x2::new(-93999654264447, 232211701825972); - assert_eq!(r, transmute(lsx_vsubwod_w_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsubwod_w_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4903,7 +6519,13 @@ unsafe fn test_lsx_vsubwod_h_bu() { ); let r = i64x2::new(-14355150803107815, 14636020195655765); - assert_eq!(r, transmute(lsx_vsubwod_h_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsubwod_h_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4912,7 +6534,13 @@ unsafe fn test_lsx_vaddwev_q_d() { let b = i64x2::new(6738886902337351868, -5985538541381931477); let r = i64x2::new(5606769623790009521, 0); - assert_eq!(r, transmute(lsx_vaddwev_q_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwev_q_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4921,7 +6549,13 @@ unsafe fn test_lsx_vaddwod_q_d() { let b = i64x2::new(-1244049724346527963, -3275029038845457041); let r = i64x2::new(-4417812606654001824, -1); - assert_eq!(r, transmute(lsx_vaddwod_q_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwod_q_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4930,7 +6564,13 @@ unsafe fn test_lsx_vaddwev_q_du() { let b = u64x2::new(6745766838534849346, 15041258018068294402); let r = i64x2::new(5074243625310689089, 1); - assert_eq!(r, transmute(lsx_vaddwev_q_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwev_q_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4939,7 +6579,13 @@ unsafe fn test_lsx_vaddwod_q_du() { let b = u64x2::new(13496765248439164553, 4640846570780442359); let r = i64x2::new(-2107214925415534967, 0); - assert_eq!(r, transmute(lsx_vaddwod_q_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwod_q_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4948,7 +6594,13 @@ unsafe fn test_lsx_vsubwev_q_d() { let b = i64x2::new(8029026411722387723, -2105201823388787841); let r = i64x2::new(480269655671735476, 0); - assert_eq!(r, transmute(lsx_vsubwev_q_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsubwev_q_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4957,7 +6609,13 @@ unsafe fn test_lsx_vsubwod_q_d() { let b = i64x2::new(5758437127240728961, 2933507971643343184); let r = i64x2::new(-8752278892998837291, -1); - assert_eq!(r, transmute(lsx_vsubwod_q_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsubwod_q_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4966,7 +6624,13 @@ unsafe fn test_lsx_vsubwev_q_du() { let b = u64x2::new(1574118313456291324, 7787456577305510529); let r = i64x2::new(-4672772323591679948, 0); - assert_eq!(r, transmute(lsx_vsubwev_q_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsubwev_q_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4975,7 +6639,13 @@ unsafe fn test_lsx_vsubwod_q_du() { let b = u64x2::new(5627376085113520030, 12775637764770549815); let r = i64x2::new(6257163948134922640, -1); - assert_eq!(r, transmute(lsx_vsubwod_q_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsubwod_q_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4984,7 +6654,13 @@ unsafe fn test_lsx_vaddwev_q_du_d() { let b = i64x2::new(-1159499132550683978, -4257322329662100669); let r = i64x2::new(-8502520416635627524, 0); - assert_eq!(r, transmute(lsx_vaddwev_q_du_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwev_q_du_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -4993,7 +6669,13 @@ unsafe fn test_lsx_vaddwod_q_du_d() { let b = i64x2::new(-3902573037873546881, 160140233311333524); let r = i64x2::new(286209858134078253, 0); - assert_eq!(r, transmute(lsx_vaddwod_q_du_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vaddwod_q_du_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5002,7 +6684,13 @@ unsafe fn test_lsx_vmulwev_d_w() { let b = i32x4::new(8741677, -276509855, -1214560052, -1338519080); let r = i64x2::new(11251431313755612, -2205748716678689436); - assert_eq!(r, transmute(lsx_vmulwev_d_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwev_d_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5011,7 +6699,13 @@ unsafe fn test_lsx_vmulwev_w_h() { let b = i16x8::new(30661, -20472, 1422, -16868, 4256, 9713, -27765, -7287); let r = i64x2::new(-178740441125036345, 469367082934888736); - assert_eq!(r, transmute(lsx_vmulwev_w_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwev_w_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5024,7 +6718,13 @@ unsafe fn test_lsx_vmulwev_h_b() { ); let r = i64x2::new(38855607073696482, 823864071118590255); - assert_eq!(r, transmute(lsx_vmulwev_h_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwev_h_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5033,7 +6733,13 @@ unsafe fn test_lsx_vmulwod_d_w() { let b = i32x4::new(63312847, -1377579771, -2054819244, -1416520586); let r = i64x2::new(1549708311038418702, 2478205834807109862); - assert_eq!(r, transmute(lsx_vmulwod_d_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwod_d_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5042,7 +6748,13 @@ unsafe fn test_lsx_vmulwod_w_h() { let b = i16x8::new(23748, 11912, 4946, -23048, 22372, 24702, -24875, -27771); let r = i64x2::new(3222038736804363232, 360450672278114574); - assert_eq!(r, transmute(lsx_vmulwod_w_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwod_w_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5055,7 +6767,13 @@ unsafe fn test_lsx_vmulwod_h_b() { ); let r = i64x2::new(-351280556043402912, 951366355207905332); - assert_eq!(r, transmute(lsx_vmulwod_h_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwod_h_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5064,7 +6782,13 @@ unsafe fn test_lsx_vmulwev_d_wu() { let b = u32x4::new(1769900227, 2256955703, 2342391995, 2407560006); let r = i64x2::new(3651844205567962921, 7772247680216328210); - assert_eq!(r, transmute(lsx_vmulwev_d_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwev_d_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5073,7 +6797,13 @@ unsafe fn test_lsx_vmulwev_w_hu() { let b = u16x8::new(20499, 45056, 20580, 12771, 53914, 60742, 45402, 40547); let r = i64x2::new(4070644332601545987, 8033224333626513014); - assert_eq!(r, transmute(lsx_vmulwev_w_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwev_w_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5086,7 +6816,13 @@ unsafe fn test_lsx_vmulwev_h_bu() { ); let r = i64x2::new(271910110892810861, 1947809607093856504); - assert_eq!(r, transmute(lsx_vmulwev_h_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwev_h_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5095,7 +6831,13 @@ unsafe fn test_lsx_vmulwod_d_wu() { let b = u32x4::new(3750239707, 1422851626, 1277923597, 1377279439); let r = i64x2::new(2821622727533716246, 3005960862740149995); - assert_eq!(r, transmute(lsx_vmulwod_d_wu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwod_d_wu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5104,7 +6846,13 @@ unsafe fn test_lsx_vmulwod_w_hu() { let b = u16x8::new(38950, 5357, 36233, 17707, 61077, 61518, 5789, 13317); let r = i64x2::new(2460325445475503463, 3109522059894091248); - assert_eq!(r, transmute(lsx_vmulwod_w_hu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwod_w_hu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5117,7 +6865,13 @@ unsafe fn test_lsx_vmulwod_h_bu() { ); let r = i64x2::new(7364114643151226902, 6612146073643521312); - assert_eq!(r, transmute(lsx_vmulwod_h_bu(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwod_h_bu( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5126,7 +6880,13 @@ unsafe fn test_lsx_vmulwev_d_wu_w() { let b = i32x4::new(1254729285, 1938836163, -1902169358, -257980375); let r = i64x2::new(2295762833698990875, -6669027432954818262); - assert_eq!(r, transmute(lsx_vmulwev_d_wu_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwev_d_wu_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5135,7 +6895,13 @@ unsafe fn test_lsx_vmulwev_w_hu_h() { let b = i16x8::new(-30477, -10049, 16428, -30668, 21000, 24834, -3219, -9555); let r = i64x2::new(3369342936690107644, -701630285043265176); - assert_eq!(r, transmute(lsx_vmulwev_w_hu_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwev_w_hu_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5148,7 +6914,13 @@ unsafe fn test_lsx_vmulwev_h_bu_b() { ); let r = i64x2::new(-1134643098233554544, -1885853116779133038); - assert_eq!(r, transmute(lsx_vmulwev_h_bu_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwev_h_bu_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5157,7 +6929,13 @@ unsafe fn test_lsx_vmulwod_d_wu_w() { let b = i32x4::new(1204047391, -1970001586, 608763444, -2082771896); let r = i64x2::new(-5967343163181744876, -3673352984882804288); - assert_eq!(r, transmute(lsx_vmulwod_d_wu_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwod_d_wu_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5166,7 +6944,13 @@ unsafe fn test_lsx_vmulwod_w_hu_h() { let b = i16x8::new(-3735, -12972, -4920, 7170, 11577, 9785, 4896, -537); let r = i64x2::new(1024392868267999948, -48053790042385565); - assert_eq!(r, transmute(lsx_vmulwod_w_hu_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwod_w_hu_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5179,7 +6963,13 @@ unsafe fn test_lsx_vmulwod_h_bu_b() { ); let r = i64x2::new(1905300476090387090, -3940634277386171400); - assert_eq!(r, transmute(lsx_vmulwod_h_bu_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwod_h_bu_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5188,7 +6978,13 @@ unsafe fn test_lsx_vmulwev_q_d() { let b = i64x2::new(7023560313675997328, 4368639658790376608); let r = i64x2::new(-1409563343912029488, -2779799970834089134); - assert_eq!(r, transmute(lsx_vmulwev_q_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwev_q_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5197,7 +6993,13 @@ unsafe fn test_lsx_vmulwod_q_d() { let b = i64x2::new(1734538850547798281, 6505001633960390309); let r = i64x2::new(655114704133495137, -1013080750363369114); - assert_eq!(r, transmute(lsx_vmulwod_q_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwod_q_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5206,7 +7008,13 @@ unsafe fn test_lsx_vmulwev_q_du() { let b = u64x2::new(15048173707940873365, 13594773395779002998); let r = i64x2::new(-4049323972691826149, 6179334620527225413); - assert_eq!(r, transmute(lsx_vmulwev_q_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwev_q_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5215,7 +7023,13 @@ unsafe fn test_lsx_vmulwod_q_du() { let b = u64x2::new(16172423495582959833, 11676106279348566952); let r = i64x2::new(-66293137947075128, 3694303051148166412); - assert_eq!(r, transmute(lsx_vmulwod_q_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwod_q_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5224,7 +7038,13 @@ unsafe fn test_lsx_vmulwev_q_du_d() { let b = i64x2::new(-7071166739782294817, 8496829998090419991); let r = i64x2::new(5234431817964974175, -5931105679667820544); - assert_eq!(r, transmute(lsx_vmulwev_q_du_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwev_q_du_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5233,7 +7053,13 @@ unsafe fn test_lsx_vmulwod_q_du_d() { let b = i64x2::new(-9085162554263782091, -3351642387065053502); let r = i64x2::new(-3119502026085414102, -1153233394465180223); - assert_eq!(r, transmute(lsx_vmulwod_q_du_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vmulwod_q_du_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5242,7 +7068,13 @@ unsafe fn test_lsx_vhaddw_q_d() { let b = i64x2::new(9222966760421493517, -8347454331188625422); let r = i64x2::new(6438946365641244151, 0); - assert_eq!(r, transmute(lsx_vhaddw_q_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vhaddw_q_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5251,7 +7083,13 @@ unsafe fn test_lsx_vhaddw_qu_du() { let b = u64x2::new(2141387370256045519, 12417156199252644485); let r = i64x2::new(5083013417816990364, 0); - assert_eq!(r, transmute(lsx_vhaddw_qu_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vhaddw_qu_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5260,7 +7098,13 @@ unsafe fn test_lsx_vhsubw_q_d() { let b = i64x2::new(-3245503809142406078, 8660213762027125085); let r = i64x2::new(817818278178354941, 0); - assert_eq!(r, transmute(lsx_vhsubw_q_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vhsubw_q_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5269,7 +7113,13 @@ unsafe fn test_lsx_vhsubw_qu_du() { let b = u64x2::new(3098179646743711521, 11374525358855478565); let r = i64x2::new(-8990580109137044958, 0); - assert_eq!(r, transmute(lsx_vhsubw_qu_du(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vhsubw_qu_du( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5281,7 +7131,11 @@ unsafe fn test_lsx_vmaddwev_d_w() { assert_eq!( r, - transmute(lsx_vmaddwev_d_w(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmaddwev_d_w( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -5294,7 +7148,11 @@ unsafe fn test_lsx_vmaddwev_w_h() { assert_eq!( r, - transmute(lsx_vmaddwev_w_h(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmaddwev_w_h( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -5311,7 +7169,11 @@ unsafe fn test_lsx_vmaddwev_h_b() { assert_eq!( r, - transmute(lsx_vmaddwev_h_b(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmaddwev_h_b( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -5324,7 +7186,11 @@ unsafe fn test_lsx_vmaddwev_d_wu() { assert_eq!( r, - transmute(lsx_vmaddwev_d_wu(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmaddwev_d_wu( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -5337,7 +7203,11 @@ unsafe fn test_lsx_vmaddwev_w_hu() { assert_eq!( r, - transmute(lsx_vmaddwev_w_hu(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmaddwev_w_hu( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -5354,7 +7224,11 @@ unsafe fn test_lsx_vmaddwev_h_bu() { assert_eq!( r, - transmute(lsx_vmaddwev_h_bu(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmaddwev_h_bu( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -5367,7 +7241,11 @@ unsafe fn test_lsx_vmaddwod_d_w() { assert_eq!( r, - transmute(lsx_vmaddwod_d_w(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmaddwod_d_w( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -5380,7 +7258,11 @@ unsafe fn test_lsx_vmaddwod_w_h() { assert_eq!( r, - transmute(lsx_vmaddwod_w_h(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmaddwod_w_h( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -5397,7 +7279,11 @@ unsafe fn test_lsx_vmaddwod_h_b() { assert_eq!( r, - transmute(lsx_vmaddwod_h_b(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmaddwod_h_b( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -5410,7 +7296,11 @@ unsafe fn test_lsx_vmaddwod_d_wu() { assert_eq!( r, - transmute(lsx_vmaddwod_d_wu(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmaddwod_d_wu( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -5423,7 +7313,11 @@ unsafe fn test_lsx_vmaddwod_w_hu() { assert_eq!( r, - transmute(lsx_vmaddwod_w_hu(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmaddwod_w_hu( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -5440,7 +7334,11 @@ unsafe fn test_lsx_vmaddwod_h_bu() { assert_eq!( r, - transmute(lsx_vmaddwod_h_bu(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmaddwod_h_bu( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -5454,9 +7352,9 @@ unsafe fn test_lsx_vmaddwev_d_wu_w() { assert_eq!( r, transmute(lsx_vmaddwev_d_wu_w( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -5471,9 +7369,9 @@ unsafe fn test_lsx_vmaddwev_w_hu_h() { assert_eq!( r, transmute(lsx_vmaddwev_w_hu_h( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -5492,9 +7390,9 @@ unsafe fn test_lsx_vmaddwev_h_bu_b() { assert_eq!( r, transmute(lsx_vmaddwev_h_bu_b( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -5509,9 +7407,9 @@ unsafe fn test_lsx_vmaddwod_d_wu_w() { assert_eq!( r, transmute(lsx_vmaddwod_d_wu_w( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -5526,9 +7424,9 @@ unsafe fn test_lsx_vmaddwod_w_hu_h() { assert_eq!( r, transmute(lsx_vmaddwod_w_hu_h( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -5547,9 +7445,9 @@ unsafe fn test_lsx_vmaddwod_h_bu_b() { assert_eq!( r, transmute(lsx_vmaddwod_h_bu_b( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -5563,7 +7461,11 @@ unsafe fn test_lsx_vmaddwev_q_d() { assert_eq!( r, - transmute(lsx_vmaddwev_q_d(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmaddwev_q_d( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -5576,7 +7478,11 @@ unsafe fn test_lsx_vmaddwod_q_d() { assert_eq!( r, - transmute(lsx_vmaddwod_q_d(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmaddwod_q_d( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -5589,7 +7495,11 @@ unsafe fn test_lsx_vmaddwev_q_du() { assert_eq!( r, - transmute(lsx_vmaddwev_q_du(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmaddwev_q_du( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -5602,7 +7512,11 @@ unsafe fn test_lsx_vmaddwod_q_du() { assert_eq!( r, - transmute(lsx_vmaddwod_q_du(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vmaddwod_q_du( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -5616,9 +7530,9 @@ unsafe fn test_lsx_vmaddwev_q_du_d() { assert_eq!( r, transmute(lsx_vmaddwev_q_du_d( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -5633,9 +7547,9 @@ unsafe fn test_lsx_vmaddwod_q_du_d() { assert_eq!( r, transmute(lsx_vmaddwod_q_du_d( - transmute(a), - transmute(b), - transmute(c) + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) )) ); } @@ -5650,7 +7564,13 @@ unsafe fn test_lsx_vrotr_b() { ); let r = i64x2::new(2841128540244802403, -8694309599374351908); - assert_eq!(r, transmute(lsx_vrotr_b(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vrotr_b( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5659,7 +7579,13 @@ unsafe fn test_lsx_vrotr_h() { let b = i16x8::new(-6485, 1418, 8263, -29872, -6491, 3930, -20621, 32531); let r = i64x2::new(2742461657407651598, 3308267577913279393); - assert_eq!(r, transmute(lsx_vrotr_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vrotr_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5668,7 +7594,13 @@ unsafe fn test_lsx_vrotr_w() { let b = i32x4::new(1956224189, -1858012941, -1889446514, -2130978943); let r = i64x2::new(6458469860191573231, -8548346292466177157); - assert_eq!(r, transmute(lsx_vrotr_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vrotr_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5677,7 +7609,13 @@ unsafe fn test_lsx_vrotr_d() { let b = i64x2::new(4553458262651691654, -5062393334123159235); let r = i64x2::new(-3594618648537251961, 7897385285240526033); - assert_eq!(r, transmute(lsx_vrotr_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vrotr_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -5686,7 +7624,10 @@ unsafe fn test_lsx_vadd_q() { let b = i64x2::new(114135477458514099, 3481307531297359399); let r = i64x2::new(2537705118259771652, 4159381110985057604); - assert_eq!(r, transmute(lsx_vadd_q(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vadd_q(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -5695,7 +7636,10 @@ unsafe fn test_lsx_vsub_q() { let b = i64x2::new(-8526086848853095438, -1323481969747305966); let r = i64x2::new(-2027679534337857341, -1789445478164204527); - assert_eq!(r, transmute(lsx_vsub_q(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vsub_q(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -5745,7 +7689,7 @@ unsafe fn test_lsx_vmskgez_b() { ); let r = i64x2::new(24930, 0); - assert_eq!(r, transmute(lsx_vmskgez_b(transmute(a)))); + assert_eq!(r, transmute(lsx_vmskgez_b(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -5755,7 +7699,7 @@ unsafe fn test_lsx_vmsknz_b() { ); let r = i64x2::new(65535, 0); - assert_eq!(r, transmute(lsx_vmsknz_b(transmute(a)))); + assert_eq!(r, transmute(lsx_vmsknz_b(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -5765,7 +7709,7 @@ unsafe fn test_lsx_vexth_h_b() { ); let r = i64x2::new(-3377613816397739, 32088276197572514); - assert_eq!(r, transmute(lsx_vexth_h_b(transmute(a)))); + assert_eq!(r, transmute(lsx_vexth_h_b(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -5773,7 +7717,7 @@ unsafe fn test_lsx_vexth_w_h() { let a = i16x8::new(14576, -26514, 14165, -15781, 10106, 1864, 23348, 30478); let r = i64x2::new(8005819049850, 130902013270836); - assert_eq!(r, transmute(lsx_vexth_w_h(transmute(a)))); + assert_eq!(r, transmute(lsx_vexth_w_h(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -5781,7 +7725,7 @@ unsafe fn test_lsx_vexth_d_w() { let a = i32x4::new(863783254, 799653326, -1122161877, -652869192); let r = i64x2::new(-1122161877, -652869192); - assert_eq!(r, transmute(lsx_vexth_d_w(transmute(a)))); + assert_eq!(r, transmute(lsx_vexth_d_w(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -5789,7 +7733,7 @@ unsafe fn test_lsx_vexth_q_d() { let a = i64x2::new(2924262436748867523, 1959694872821330818); let r = i64x2::new(1959694872821330818, 0); - assert_eq!(r, transmute(lsx_vexth_q_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vexth_q_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -5799,7 +7743,7 @@ unsafe fn test_lsx_vexth_hu_bu() { ); let r = i64x2::new(61080980486815914, 60235902725652628); - assert_eq!(r, transmute(lsx_vexth_hu_bu(transmute(a)))); + assert_eq!(r, transmute(lsx_vexth_hu_bu(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -5807,7 +7751,7 @@ unsafe fn test_lsx_vexth_wu_hu() { let a = u16x8::new(58875, 18924, 17611, 30197, 33869, 53931, 4693, 53025); let r = i64x2::new(231631881274445, 227740640875093); - assert_eq!(r, transmute(lsx_vexth_wu_hu(transmute(a)))); + assert_eq!(r, transmute(lsx_vexth_wu_hu(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -5815,7 +7759,7 @@ unsafe fn test_lsx_vexth_du_wu() { let a = u32x4::new(3499742961, 2840979237, 2082263829, 1096292547); let r = i64x2::new(2082263829, 1096292547); - assert_eq!(r, transmute(lsx_vexth_du_wu(transmute(a)))); + assert_eq!(r, transmute(lsx_vexth_du_wu(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -5823,7 +7767,7 @@ unsafe fn test_lsx_vexth_qu_du() { let a = u64x2::new(14170556367894986991, 14238702840099699193); let r = i64x2::new(-4208041233609852423, 0); - assert_eq!(r, transmute(lsx_vexth_qu_du(transmute(a)))); + assert_eq!(r, transmute(lsx_vexth_qu_du(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -5833,7 +7777,7 @@ unsafe fn test_lsx_vrotri_b() { ); let r = i64x2::new(-2919654548887155519, -96080239582005205); - assert_eq!(r, transmute(lsx_vrotri_b::<2>(transmute(a)))); + assert_eq!(r, transmute(lsx_vrotri_b::<2>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -5841,7 +7785,7 @@ unsafe fn test_lsx_vrotri_h() { let a = i16x8::new(-14120, -16812, -19570, -990, 24476, -7640, 20329, 8879); let r = i64x2::new(-556925602567188047, 4998607264501841720); - assert_eq!(r, transmute(lsx_vrotri_h::<15>(transmute(a)))); + assert_eq!(r, transmute(lsx_vrotri_h::<15>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -5849,7 +7793,7 @@ unsafe fn test_lsx_vrotri_w() { let a = i32x4::new(-1760224525, -1644621284, 1835781046, -1487934110); let r = i64x2::new(2845787365010917052, -6209343103231659283); - assert_eq!(r, transmute(lsx_vrotri_w::<2>(transmute(a)))); + assert_eq!(r, transmute(lsx_vrotri_w::<2>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -5857,7 +7801,7 @@ unsafe fn test_lsx_vrotri_d() { let a = i64x2::new(8884634342417174882, 244175985366916345); let r = i64x2::new(-3963790888197019724, 4020656082573561910); - assert_eq!(r, transmute(lsx_vrotri_d::<52>(transmute(a)))); + assert_eq!(r, transmute(lsx_vrotri_d::<52>(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -5865,7 +7809,7 @@ unsafe fn test_lsx_vextl_q_d() { let a = i64x2::new(-5110246490938885255, 377414780188285171); let r = i64x2::new(-5110246490938885255, -1); - assert_eq!(r, transmute(lsx_vextl_q_d(transmute(a)))); + assert_eq!(r, transmute(lsx_vextl_q_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -5880,7 +7824,10 @@ unsafe fn test_lsx_vsrlni_b_h() { assert_eq!( r, - transmute(lsx_vsrlni_b_h::<14>(transmute(a), transmute(b))) + transmute(lsx_vsrlni_b_h::<14>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -5892,7 +7839,10 @@ unsafe fn test_lsx_vsrlni_h_w() { assert_eq!( r, - transmute(lsx_vsrlni_h_w::<26>(transmute(a), transmute(b))) + transmute(lsx_vsrlni_h_w::<26>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -5904,7 +7854,10 @@ unsafe fn test_lsx_vsrlni_w_d() { assert_eq!( r, - transmute(lsx_vsrlni_w_d::<18>(transmute(a), transmute(b))) + transmute(lsx_vsrlni_w_d::<18>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -5916,7 +7869,10 @@ unsafe fn test_lsx_vsrlni_d_q() { assert_eq!( r, - transmute(lsx_vsrlni_d_q::<74>(transmute(a), transmute(b))) + transmute(lsx_vsrlni_d_q::<74>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -5932,7 +7888,10 @@ unsafe fn test_lsx_vsrlrni_b_h() { assert_eq!( r, - transmute(lsx_vsrlrni_b_h::<6>(transmute(a), transmute(b))) + transmute(lsx_vsrlrni_b_h::<6>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -5944,7 +7903,10 @@ unsafe fn test_lsx_vsrlrni_h_w() { assert_eq!( r, - transmute(lsx_vsrlrni_h_w::<6>(transmute(a), transmute(b))) + transmute(lsx_vsrlrni_h_w::<6>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -5956,7 +7918,10 @@ unsafe fn test_lsx_vsrlrni_w_d() { assert_eq!( r, - transmute(lsx_vsrlrni_w_d::<52>(transmute(a), transmute(b))) + transmute(lsx_vsrlrni_w_d::<52>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -5968,7 +7933,10 @@ unsafe fn test_lsx_vsrlrni_d_q() { assert_eq!( r, - transmute(lsx_vsrlrni_d_q::<101>(transmute(a), transmute(b))) + transmute(lsx_vsrlrni_d_q::<101>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -5984,7 +7952,10 @@ unsafe fn test_lsx_vssrlni_b_h() { assert_eq!( r, - transmute(lsx_vssrlni_b_h::<13>(transmute(a), transmute(b))) + transmute(lsx_vssrlni_b_h::<13>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -5996,7 +7967,10 @@ unsafe fn test_lsx_vssrlni_h_w() { assert_eq!( r, - transmute(lsx_vssrlni_h_w::<23>(transmute(a), transmute(b))) + transmute(lsx_vssrlni_h_w::<23>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6008,7 +7982,10 @@ unsafe fn test_lsx_vssrlni_w_d() { assert_eq!( r, - transmute(lsx_vssrlni_w_d::<12>(transmute(a), transmute(b))) + transmute(lsx_vssrlni_w_d::<12>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6020,7 +7997,10 @@ unsafe fn test_lsx_vssrlni_d_q() { assert_eq!( r, - transmute(lsx_vssrlni_d_q::<88>(transmute(a), transmute(b))) + transmute(lsx_vssrlni_d_q::<88>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6036,7 +8016,10 @@ unsafe fn test_lsx_vssrlni_bu_h() { assert_eq!( r, - transmute(lsx_vssrlni_bu_h::<13>(transmute(a), transmute(b))) + transmute(lsx_vssrlni_bu_h::<13>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6048,7 +8031,10 @@ unsafe fn test_lsx_vssrlni_hu_w() { assert_eq!( r, - transmute(lsx_vssrlni_hu_w::<9>(transmute(a), transmute(b))) + transmute(lsx_vssrlni_hu_w::<9>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6060,7 +8046,10 @@ unsafe fn test_lsx_vssrlni_wu_d() { assert_eq!( r, - transmute(lsx_vssrlni_wu_d::<59>(transmute(a), transmute(b))) + transmute(lsx_vssrlni_wu_d::<59>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6072,7 +8061,10 @@ unsafe fn test_lsx_vssrlni_du_q() { assert_eq!( r, - transmute(lsx_vssrlni_du_q::<6>(transmute(a), transmute(b))) + transmute(lsx_vssrlni_du_q::<6>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6088,7 +8080,10 @@ unsafe fn test_lsx_vssrlrni_b_h() { assert_eq!( r, - transmute(lsx_vssrlrni_b_h::<0>(transmute(a), transmute(b))) + transmute(lsx_vssrlrni_b_h::<0>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6100,7 +8095,10 @@ unsafe fn test_lsx_vssrlrni_h_w() { assert_eq!( r, - transmute(lsx_vssrlrni_h_w::<28>(transmute(a), transmute(b))) + transmute(lsx_vssrlrni_h_w::<28>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6112,7 +8110,10 @@ unsafe fn test_lsx_vssrlrni_w_d() { assert_eq!( r, - transmute(lsx_vssrlrni_w_d::<1>(transmute(a), transmute(b))) + transmute(lsx_vssrlrni_w_d::<1>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6124,7 +8125,10 @@ unsafe fn test_lsx_vssrlrni_d_q() { assert_eq!( r, - transmute(lsx_vssrlrni_d_q::<60>(transmute(a), transmute(b))) + transmute(lsx_vssrlrni_d_q::<60>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6140,7 +8144,10 @@ unsafe fn test_lsx_vssrlrni_bu_h() { assert_eq!( r, - transmute(lsx_vssrlrni_bu_h::<13>(transmute(a), transmute(b))) + transmute(lsx_vssrlrni_bu_h::<13>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6152,7 +8159,10 @@ unsafe fn test_lsx_vssrlrni_hu_w() { assert_eq!( r, - transmute(lsx_vssrlrni_hu_w::<25>(transmute(a), transmute(b))) + transmute(lsx_vssrlrni_hu_w::<25>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6164,7 +8174,10 @@ unsafe fn test_lsx_vssrlrni_wu_d() { assert_eq!( r, - transmute(lsx_vssrlrni_wu_d::<36>(transmute(a), transmute(b))) + transmute(lsx_vssrlrni_wu_d::<36>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6176,7 +8189,10 @@ unsafe fn test_lsx_vssrlrni_du_q() { assert_eq!( r, - transmute(lsx_vssrlrni_du_q::<38>(transmute(a), transmute(b))) + transmute(lsx_vssrlrni_du_q::<38>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6192,7 +8208,10 @@ unsafe fn test_lsx_vsrani_b_h() { assert_eq!( r, - transmute(lsx_vsrani_b_h::<5>(transmute(a), transmute(b))) + transmute(lsx_vsrani_b_h::<5>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6204,7 +8223,10 @@ unsafe fn test_lsx_vsrani_h_w() { assert_eq!( r, - transmute(lsx_vsrani_h_w::<4>(transmute(a), transmute(b))) + transmute(lsx_vsrani_h_w::<4>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6216,7 +8238,10 @@ unsafe fn test_lsx_vsrani_w_d() { assert_eq!( r, - transmute(lsx_vsrani_w_d::<24>(transmute(a), transmute(b))) + transmute(lsx_vsrani_w_d::<24>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6228,7 +8253,10 @@ unsafe fn test_lsx_vsrani_d_q() { assert_eq!( r, - transmute(lsx_vsrani_d_q::<81>(transmute(a), transmute(b))) + transmute(lsx_vsrani_d_q::<81>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6244,7 +8272,10 @@ unsafe fn test_lsx_vsrarni_b_h() { assert_eq!( r, - transmute(lsx_vsrarni_b_h::<3>(transmute(a), transmute(b))) + transmute(lsx_vsrarni_b_h::<3>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6256,7 +8287,10 @@ unsafe fn test_lsx_vsrarni_h_w() { assert_eq!( r, - transmute(lsx_vsrarni_h_w::<15>(transmute(a), transmute(b))) + transmute(lsx_vsrarni_h_w::<15>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6268,7 +8302,10 @@ unsafe fn test_lsx_vsrarni_w_d() { assert_eq!( r, - transmute(lsx_vsrarni_w_d::<59>(transmute(a), transmute(b))) + transmute(lsx_vsrarni_w_d::<59>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6280,7 +8317,10 @@ unsafe fn test_lsx_vsrarni_d_q() { assert_eq!( r, - transmute(lsx_vsrarni_d_q::<0>(transmute(a), transmute(b))) + transmute(lsx_vsrarni_d_q::<0>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6296,7 +8336,10 @@ unsafe fn test_lsx_vssrani_b_h() { assert_eq!( r, - transmute(lsx_vssrani_b_h::<0>(transmute(a), transmute(b))) + transmute(lsx_vssrani_b_h::<0>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6308,7 +8351,10 @@ unsafe fn test_lsx_vssrani_h_w() { assert_eq!( r, - transmute(lsx_vssrani_h_w::<28>(transmute(a), transmute(b))) + transmute(lsx_vssrani_h_w::<28>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6320,7 +8366,10 @@ unsafe fn test_lsx_vssrani_w_d() { assert_eq!( r, - transmute(lsx_vssrani_w_d::<49>(transmute(a), transmute(b))) + transmute(lsx_vssrani_w_d::<49>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6332,7 +8381,10 @@ unsafe fn test_lsx_vssrani_d_q() { assert_eq!( r, - transmute(lsx_vssrani_d_q::<80>(transmute(a), transmute(b))) + transmute(lsx_vssrani_d_q::<80>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6348,7 +8400,10 @@ unsafe fn test_lsx_vssrani_bu_h() { assert_eq!( r, - transmute(lsx_vssrani_bu_h::<14>(transmute(a), transmute(b))) + transmute(lsx_vssrani_bu_h::<14>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6360,7 +8415,10 @@ unsafe fn test_lsx_vssrani_hu_w() { assert_eq!( r, - transmute(lsx_vssrani_hu_w::<23>(transmute(a), transmute(b))) + transmute(lsx_vssrani_hu_w::<23>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6372,7 +8430,10 @@ unsafe fn test_lsx_vssrani_wu_d() { assert_eq!( r, - transmute(lsx_vssrani_wu_d::<13>(transmute(a), transmute(b))) + transmute(lsx_vssrani_wu_d::<13>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6384,7 +8445,10 @@ unsafe fn test_lsx_vssrani_du_q() { assert_eq!( r, - transmute(lsx_vssrani_du_q::<33>(transmute(a), transmute(b))) + transmute(lsx_vssrani_du_q::<33>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6400,7 +8464,10 @@ unsafe fn test_lsx_vssrarni_b_h() { assert_eq!( r, - transmute(lsx_vssrarni_b_h::<2>(transmute(a), transmute(b))) + transmute(lsx_vssrarni_b_h::<2>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6412,7 +8479,10 @@ unsafe fn test_lsx_vssrarni_h_w() { assert_eq!( r, - transmute(lsx_vssrarni_h_w::<29>(transmute(a), transmute(b))) + transmute(lsx_vssrarni_h_w::<29>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6424,7 +8494,10 @@ unsafe fn test_lsx_vssrarni_w_d() { assert_eq!( r, - transmute(lsx_vssrarni_w_d::<18>(transmute(a), transmute(b))) + transmute(lsx_vssrarni_w_d::<18>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6436,7 +8509,10 @@ unsafe fn test_lsx_vssrarni_d_q() { assert_eq!( r, - transmute(lsx_vssrarni_d_q::<70>(transmute(a), transmute(b))) + transmute(lsx_vssrarni_d_q::<70>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6452,7 +8528,10 @@ unsafe fn test_lsx_vssrarni_bu_h() { assert_eq!( r, - transmute(lsx_vssrarni_bu_h::<14>(transmute(a), transmute(b))) + transmute(lsx_vssrarni_bu_h::<14>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6464,7 +8543,10 @@ unsafe fn test_lsx_vssrarni_hu_w() { assert_eq!( r, - transmute(lsx_vssrarni_hu_w::<13>(transmute(a), transmute(b))) + transmute(lsx_vssrarni_hu_w::<13>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6476,7 +8558,10 @@ unsafe fn test_lsx_vssrarni_wu_d() { assert_eq!( r, - transmute(lsx_vssrarni_wu_d::<15>(transmute(a), transmute(b))) + transmute(lsx_vssrarni_wu_d::<15>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6488,7 +8573,10 @@ unsafe fn test_lsx_vssrarni_du_q() { assert_eq!( r, - transmute(lsx_vssrarni_du_q::<126>(transmute(a), transmute(b))) + transmute(lsx_vssrarni_du_q::<126>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6500,7 +8588,10 @@ unsafe fn test_lsx_vpermi_w() { assert_eq!( r, - transmute(lsx_vpermi_w::<158>(transmute(a), transmute(b))) + transmute(lsx_vpermi_w::<158>( + black_box(transmute(a)), + black_box(transmute(b)) + )) ); } @@ -6524,7 +8615,7 @@ unsafe fn test_lsx_vst() { ]; let r = i64x2::new(4153633675232462821, -2083384694265299697); - lsx_vst::<0>(transmute(a), o.as_mut_ptr()); + lsx_vst::<0>(black_box(transmute(a)), o.as_mut_ptr()); assert_eq!(r, transmute(o)); } @@ -6534,7 +8625,13 @@ unsafe fn test_lsx_vssrlrn_b_h() { let b = i16x8::new(17437, 9775, -20467, -31838, 5913, 4238, -7458, 2822); let r = i64x2::new(5981906731171643399, 0); - assert_eq!(r, transmute(lsx_vssrlrn_b_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssrlrn_b_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6543,7 +8640,13 @@ unsafe fn test_lsx_vssrlrn_h_w() { let b = i32x4::new(-2116426818, 1641049288, 712377342, -1572394121); let r = i64x2::new(31243728857268226, 0); - assert_eq!(r, transmute(lsx_vssrlrn_h_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssrlrn_h_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6552,7 +8655,13 @@ unsafe fn test_lsx_vssrlrn_w_d() { let b = i64x2::new(-3890929847852895653, -7819301294522132056); let r = i64x2::new(66519777023098879, 0); - assert_eq!(r, transmute(lsx_vssrlrn_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssrlrn_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6561,7 +8670,13 @@ unsafe fn test_lsx_vssrln_b_h() { let b = i16x8::new(-14062, -29610, -24609, -8884, -1818, 32133, 29934, -6498); let r = i64x2::new(140183437672319, 0); - assert_eq!(r, transmute(lsx_vssrln_b_h(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssrln_b_h( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6570,7 +8685,13 @@ unsafe fn test_lsx_vssrln_h_w() { let b = i32x4::new(-1437891045, 1546371535, -1800954476, -1892390372); let r = i64x2::new(2820489990832156, 0); - assert_eq!(r, transmute(lsx_vssrln_h_w(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssrln_h_w( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6579,7 +8700,13 @@ unsafe fn test_lsx_vssrln_w_d() { let b = i64x2::new(2034490755997557661, -3470252066162700534); let r = i64x2::new(9223372034707292159, 0); - assert_eq!(r, transmute(lsx_vssrln_w_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vssrln_w_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6592,7 +8719,10 @@ unsafe fn test_lsx_vorn_v() { ); let r = i64x2::new(-883973744907789059, -2901520201165080862); - assert_eq!(r, transmute(lsx_vorn_v(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vorn_v(black_box(transmute(a)), black_box(transmute(b)))) + ); } #[simd_test(enable = "lsx")] @@ -6615,7 +8745,11 @@ unsafe fn test_lsx_vshuf_b() { assert_eq!( r, - transmute(lsx_vshuf_b(transmute(a), transmute(b), transmute(c))) + transmute(lsx_vshuf_b( + black_box(transmute(a)), + black_box(transmute(b)), + black_box(transmute(c)) + )) ); } @@ -6639,7 +8773,7 @@ unsafe fn test_lsx_vstx() { ]; let r = i64x2::new(-1493444417618012559, 7191635320606490850); - lsx_vstx(transmute(a), o.as_mut_ptr(), 0); + lsx_vstx(black_box(transmute(a)), o.as_mut_ptr(), 0); assert_eq!(r, transmute(o)); } @@ -6648,7 +8782,7 @@ unsafe fn test_lsx_vextl_qu_du() { let a = u64x2::new(14708598110732796778, 2132245682694336458); let r = i64x2::new(-3738145962976754838, 0); - assert_eq!(r, transmute(lsx_vextl_qu_du(transmute(a)))); + assert_eq!(r, transmute(lsx_vextl_qu_du(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -6658,7 +8792,7 @@ unsafe fn test_lsx_bnz_b() { ); let r: i32 = 1; - assert_eq!(r, transmute(lsx_bnz_b(transmute(a)))); + assert_eq!(r, transmute(lsx_bnz_b(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -6666,7 +8800,7 @@ unsafe fn test_lsx_bnz_d() { let a = u64x2::new(2935166648440262530, 9853932033129373129); let r: i32 = 1; - assert_eq!(r, transmute(lsx_bnz_d(transmute(a)))); + assert_eq!(r, transmute(lsx_bnz_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -6674,7 +8808,7 @@ unsafe fn test_lsx_bnz_h() { let a = u16x8::new(55695, 60003, 59560, 35123, 25693, 41352, 61626, 42007); let r: i32 = 1; - assert_eq!(r, transmute(lsx_bnz_h(transmute(a)))); + assert_eq!(r, transmute(lsx_bnz_h(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -6684,7 +8818,7 @@ unsafe fn test_lsx_bnz_v() { ); let r: i32 = 1; - assert_eq!(r, transmute(lsx_bnz_v(transmute(a)))); + assert_eq!(r, transmute(lsx_bnz_v(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -6692,7 +8826,7 @@ unsafe fn test_lsx_bnz_w() { let a = u32x4::new(1172712391, 4211490091, 1954893853, 1606462106); let r: i32 = 1; - assert_eq!(r, transmute(lsx_bnz_w(transmute(a)))); + assert_eq!(r, transmute(lsx_bnz_w(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -6702,7 +8836,7 @@ unsafe fn test_lsx_bz_b() { ); let r: i32 = 0; - assert_eq!(r, transmute(lsx_bz_b(transmute(a)))); + assert_eq!(r, transmute(lsx_bz_b(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -6710,7 +8844,7 @@ unsafe fn test_lsx_bz_d() { let a = u64x2::new(6051854163594201075, 9957257179760945130); let r: i32 = 0; - assert_eq!(r, transmute(lsx_bz_d(transmute(a)))); + assert_eq!(r, transmute(lsx_bz_d(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -6718,7 +8852,7 @@ unsafe fn test_lsx_bz_h() { let a = u16x8::new(19470, 29377, 53886, 60432, 20799, 41755, 54479, 52192); let r: i32 = 0; - assert_eq!(r, transmute(lsx_bz_h(transmute(a)))); + assert_eq!(r, transmute(lsx_bz_h(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -6728,7 +8862,7 @@ unsafe fn test_lsx_bz_v() { ); let r: i32 = 0; - assert_eq!(r, transmute(lsx_bz_v(transmute(a)))); + assert_eq!(r, transmute(lsx_bz_v(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -6736,7 +8870,7 @@ unsafe fn test_lsx_bz_w() { let a = u32x4::new(840335855, 1404686204, 628335401, 1171808080); let r: i32 = 0; - assert_eq!(r, transmute(lsx_bz_w(transmute(a)))); + assert_eq!(r, transmute(lsx_bz_w(black_box(transmute(a))))); } #[simd_test(enable = "lsx")] @@ -6745,7 +8879,13 @@ unsafe fn test_lsx_vfcmp_caf_d() { let b = u64x2::new(4594845432849836188, 4605165420863530034); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfcmp_caf_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_caf_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6754,7 +8894,13 @@ unsafe fn test_lsx_vfcmp_caf_s() { let b = u32x4::new(1058412800, 1058762495, 1028487696, 1027290752); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfcmp_caf_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_caf_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6763,7 +8909,13 @@ unsafe fn test_lsx_vfcmp_ceq_d() { let b = u64x2::new(4605937250150464526, 4596769502461699132); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfcmp_ceq_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_ceq_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6772,7 +8924,13 @@ unsafe fn test_lsx_vfcmp_ceq_s() { let b = u32x4::new(1057471620, 1064008655, 1062698831, 1064822930); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfcmp_ceq_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_ceq_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6781,7 +8939,13 @@ unsafe fn test_lsx_vfcmp_cle_d() { let b = u64x2::new(4596931282408842596, 4592481315209481584); let r = i64x2::new(-1, 0); - assert_eq!(r, transmute(lsx_vfcmp_cle_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_cle_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6790,7 +8954,13 @@ unsafe fn test_lsx_vfcmp_cle_s() { let b = u32x4::new(1021993344, 1043028808, 1064182329, 1054794412); let r = i64x2::new(-4294967296, -1); - assert_eq!(r, transmute(lsx_vfcmp_cle_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_cle_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6799,7 +8969,13 @@ unsafe fn test_lsx_vfcmp_clt_d() { let b = u64x2::new(4603056125735978454, 4595932368389116476); let r = i64x2::new(-1, -1); - assert_eq!(r, transmute(lsx_vfcmp_clt_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_clt_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6808,7 +8984,13 @@ unsafe fn test_lsx_vfcmp_clt_s() { let b = u32x4::new(1040327468, 1040072248, 1063314103, 1061361061); let r = i64x2::new(0, -1); - assert_eq!(r, transmute(lsx_vfcmp_clt_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_clt_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6817,7 +8999,13 @@ unsafe fn test_lsx_vfcmp_cne_d() { let b = u64x2::new(4602354759349431170, 4598595124838935466); let r = i64x2::new(-1, -1); - assert_eq!(r, transmute(lsx_vfcmp_cne_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_cne_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6826,7 +9014,13 @@ unsafe fn test_lsx_vfcmp_cne_s() { let b = u32x4::new(1063262940, 1058010357, 1052721962, 1061295988); let r = i64x2::new(-1, -1); - assert_eq!(r, transmute(lsx_vfcmp_cne_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_cne_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6835,7 +9029,13 @@ unsafe fn test_lsx_vfcmp_cor_d() { let b = u64x2::new(4606863361114437050, 4600753700959452152); let r = i64x2::new(-1, -1); - assert_eq!(r, transmute(lsx_vfcmp_cor_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_cor_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6844,7 +9044,13 @@ unsafe fn test_lsx_vfcmp_cor_s() { let b = u32x4::new(1053615382, 1065255138, 1051565294, 1041776832); let r = i64x2::new(-1, -1); - assert_eq!(r, transmute(lsx_vfcmp_cor_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_cor_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6853,7 +9059,13 @@ unsafe fn test_lsx_vfcmp_cueq_d() { let b = u64x2::new(4603317345052528721, 4586734343919602352); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfcmp_cueq_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_cueq_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6862,7 +9074,13 @@ unsafe fn test_lsx_vfcmp_cueq_s() { let b = u32x4::new(1057082822, 1059761998, 1052599998, 1054369118); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfcmp_cueq_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_cueq_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6871,7 +9089,13 @@ unsafe fn test_lsx_vfcmp_cule_d() { let b = u64x2::new(4604253448175093958, 4599648167588382448); let r = i64x2::new(-1, -1); - assert_eq!(r, transmute(lsx_vfcmp_cule_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_cule_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6880,7 +9104,13 @@ unsafe fn test_lsx_vfcmp_cule_s() { let b = u32x4::new(1051100696, 1062219104, 1064568294, 1032521352); let r = i64x2::new(-4294967296, 4294967295); - assert_eq!(r, transmute(lsx_vfcmp_cule_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_cule_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6889,7 +9119,13 @@ unsafe fn test_lsx_vfcmp_cult_d() { let b = u64x2::new(4602944708025910986, 4606429728449082215); let r = i64x2::new(0, -1); - assert_eq!(r, transmute(lsx_vfcmp_cult_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_cult_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6898,7 +9134,13 @@ unsafe fn test_lsx_vfcmp_cult_s() { let b = u32x4::new(1030808384, 1044268840, 1050761328, 1037308928); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfcmp_cult_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_cult_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6907,7 +9149,13 @@ unsafe fn test_lsx_vfcmp_cun_d() { let b = u64x2::new(4599145506416791474, 4602762942707610466); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfcmp_cun_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_cun_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6916,7 +9164,13 @@ unsafe fn test_lsx_vfcmp_cune_d() { let b = u64x2::new(4602895209237804084, 4598685577984089858); let r = i64x2::new(-1, -1); - assert_eq!(r, transmute(lsx_vfcmp_cune_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_cune_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6925,7 +9179,13 @@ unsafe fn test_lsx_vfcmp_cune_s() { let b = u32x4::new(1049955876, 1032474200, 1023410112, 1050347912); let r = i64x2::new(-1, -1); - assert_eq!(r, transmute(lsx_vfcmp_cune_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_cune_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6934,7 +9194,13 @@ unsafe fn test_lsx_vfcmp_cun_s() { let b = u32x4::new(1053288920, 1059911123, 1058695573, 1062913175); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfcmp_cun_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_cun_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6943,7 +9209,13 @@ unsafe fn test_lsx_vfcmp_saf_d() { let b = u64x2::new(4589118818065931376, 4603302333347826011); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfcmp_saf_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_saf_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6952,7 +9224,13 @@ unsafe fn test_lsx_vfcmp_saf_s() { let b = u32x4::new(1044756936, 1054667546, 1059141760, 1062203553); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfcmp_saf_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_saf_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6961,7 +9239,13 @@ unsafe fn test_lsx_vfcmp_seq_d() { let b = u64x2::new(4594167956310606988, 4596272126122589228); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfcmp_seq_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_seq_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6970,7 +9254,13 @@ unsafe fn test_lsx_vfcmp_seq_s() { let b = u32x4::new(1057231588, 1051495460, 1057998997, 1049117328); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfcmp_seq_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_seq_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6979,7 +9269,13 @@ unsafe fn test_lsx_vfcmp_sle_d() { let b = u64x2::new(4603919005855163252, 4594682846653946884); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfcmp_sle_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_sle_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6988,7 +9284,13 @@ unsafe fn test_lsx_vfcmp_sle_s() { let b = u32x4::new(1045989468, 1052518900, 1046184640, 1032417352); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfcmp_sle_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_sle_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -6997,7 +9299,13 @@ unsafe fn test_lsx_vfcmp_slt_d() { let b = u64x2::new(4600564867142526828, 4585131890265864544); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfcmp_slt_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_slt_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -7006,7 +9314,13 @@ unsafe fn test_lsx_vfcmp_slt_s() { let b = u32x4::new(1063435026, 1062439603, 1060665555, 1059252630); let r = i64x2::new(-1, -4294967296); - assert_eq!(r, transmute(lsx_vfcmp_slt_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_slt_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -7015,7 +9329,13 @@ unsafe fn test_lsx_vfcmp_sne_d() { let b = u64x2::new(4606789952952688555, 4605380358192261377); let r = i64x2::new(-1, -1); - assert_eq!(r, transmute(lsx_vfcmp_sne_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_sne_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -7024,7 +9344,13 @@ unsafe fn test_lsx_vfcmp_sne_s() { let b = u32x4::new(1055803760, 1063372602, 1062608900, 1054634370); let r = i64x2::new(-1, -1); - assert_eq!(r, transmute(lsx_vfcmp_sne_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_sne_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -7033,7 +9359,13 @@ unsafe fn test_lsx_vfcmp_sor_d() { let b = u64x2::new(4606380175568635560, 4602092067387067462); let r = i64x2::new(-1, -1); - assert_eq!(r, transmute(lsx_vfcmp_sor_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_sor_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -7042,7 +9374,13 @@ unsafe fn test_lsx_vfcmp_sor_s() { let b = u32x4::new(1064534350, 1035771168, 1059142426, 1034677600); let r = i64x2::new(-1, -1); - assert_eq!(r, transmute(lsx_vfcmp_sor_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_sor_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -7051,7 +9389,13 @@ unsafe fn test_lsx_vfcmp_sueq_d() { let b = u64x2::new(4602917609947054533, 4605983209212177197); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfcmp_sueq_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_sueq_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -7060,7 +9404,13 @@ unsafe fn test_lsx_vfcmp_sueq_s() { let b = u32x4::new(1064871165, 1059796257, 1055456352, 1058662692); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfcmp_sueq_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_sueq_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -7069,7 +9419,13 @@ unsafe fn test_lsx_vfcmp_sule_d() { let b = u64x2::new(4594044173266256632, 4601549551994738386); let r = i64x2::new(0, -1); - assert_eq!(r, transmute(lsx_vfcmp_sule_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_sule_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -7078,7 +9434,13 @@ unsafe fn test_lsx_vfcmp_sule_s() { let b = u32x4::new(1061061244, 1051874412, 1041025316, 1056018690); let r = i64x2::new(4294967295, -1); - assert_eq!(r, transmute(lsx_vfcmp_sule_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_sule_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -7087,7 +9449,13 @@ unsafe fn test_lsx_vfcmp_sult_d() { let b = u64x2::new(4603848042095479627, 4605032971316970060); let r = i64x2::new(-1, -1); - assert_eq!(r, transmute(lsx_vfcmp_sult_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_sult_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -7096,7 +9464,13 @@ unsafe fn test_lsx_vfcmp_sult_s() { let b = u32x4::new(1053631630, 1064026599, 1058029398, 1041182304); let r = i64x2::new(-4294967296, 4294967295); - assert_eq!(r, transmute(lsx_vfcmp_sult_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_sult_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -7105,7 +9479,13 @@ unsafe fn test_lsx_vfcmp_sun_d() { let b = u64x2::new(4560681020073292800, 4604624347352815433); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfcmp_sun_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_sun_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -7114,7 +9494,13 @@ unsafe fn test_lsx_vfcmp_sune_d() { let b = u64x2::new(4593947987798339484, 4603656097008761637); let r = i64x2::new(-1, -1); - assert_eq!(r, transmute(lsx_vfcmp_sune_d(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_sune_d( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -7123,7 +9509,13 @@ unsafe fn test_lsx_vfcmp_sune_s() { let b = u32x4::new(1049327168, 1034635272, 1042258196, 1062844003); let r = i64x2::new(-1, -1); - assert_eq!(r, transmute(lsx_vfcmp_sune_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_sune_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] @@ -7132,7 +9524,13 @@ unsafe fn test_lsx_vfcmp_sun_s() { let b = u32x4::new(1057442863, 1064573466, 1058086753, 1015993248); let r = i64x2::new(0, 0); - assert_eq!(r, transmute(lsx_vfcmp_sun_s(transmute(a), transmute(b)))); + assert_eq!( + r, + transmute(lsx_vfcmp_sun_s( + black_box(transmute(a)), + black_box(transmute(b)) + )) + ); } #[simd_test(enable = "lsx")] diff --git a/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs b/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs index fe767fc30917d..3a946a12d6619 100644 --- a/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs +++ b/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs @@ -847,6 +847,7 @@ union v4df out.push_str(" printf(\" core_arch::{loongarch64::*, simd::*},\\n\");\n"); out.push_str(" printf(\" mem::transmute,\\n\");\n"); out.push_str(" printf(\"};\\n\");\n"); + out.push_str(" printf(\"use std::hint::black_box;\\n\");\n"); out.push_str(" printf(\"use stdarch_test::simd_test;\\n\");\n"); out.push_str(&call_function_str); out.push_str(" return 0;\n"); @@ -1323,10 +1324,10 @@ fn gen_test_body( _ => "unsupported parameter number".to_string(), }; let mut as_params = match para_num { - 1 => "(transmute(a))".to_string(), - 2 => "(transmute(a), transmute(b))".to_string(), - 3 => "(transmute(a), transmute(b), transmute(c))".to_string(), - 4 => "(transmute(a), transmute(b), transmute(c), transmute(d))".to_string(), + 1 => "(black_box(transmute(a)))".to_string(), + 2 => "(black_box(transmute(a)), black_box(transmute(b)))".to_string(), + 3 => "(black_box(transmute(a)), black_box(transmute(b)), black_box(transmute(c)))".to_string(), + 4 => "(black_box(transmute(a)), black_box(transmute(b)), black_box(transmute(c)), black_box(transmute(d)))".to_string(), _ => panic!("unsupported parameter number"), }; let mut as_args = String::new(); @@ -1356,9 +1357,9 @@ fn gen_test_body( { fn_params = "(a)".to_string(); if in_t[0] == "SI" { - as_params = "(%d)".to_string(); + as_params = "(black_box(%d))".to_string(); } else { - as_params = "(%ld)".to_string(); + as_params = "(black_box(%ld))".to_string(); } as_args = ", a".to_string(); } else if para_num == 2 && (in_t[1] == "UQI" || in_t[1] == "USI") { @@ -1370,7 +1371,7 @@ fn gen_test_body( ); let val = rand_u32(asm_fmts[2].get(2..).unwrap().parse::().unwrap()); fn_params = format!("(a.v, {val})"); - as_params = format!("::<{val}>(transmute(a))"); + as_params = format!("::<{val}>(black_box(transmute(a)))"); } else { panic!("unsupported assembly format: {}", asm_fmts[2]); } @@ -1383,13 +1384,13 @@ fn gen_test_body( ); let val = rand_i32(asm_fmts[2].get(2..).unwrap().parse::().unwrap()); fn_params = format!("(a.v, {val})"); - as_params = format!("::<{val}>(transmute(a))"); + as_params = format!("::<{val}>(black_box(transmute(a)))"); } else { panic!("unsupported assembly format: {}", asm_fmts[2]); } } else if para_num == 2 && in_t[1] == "SI" && asm_fmts[2].starts_with("rk") { fn_params = "(a.v, b)".to_string(); - as_params = "(transmute(a), %d)".to_string(); + as_params = "(black_box(transmute(a)), %d)".to_string(); as_args = ", b".to_string(); } else if para_num == 2 && in_t[0] == "CVPOINTER" && in_t[1] == "SI" { if asm_fmts[2].starts_with("si") { @@ -1441,7 +1442,7 @@ fn gen_test_body( let ival = rand_i32(32); let uval = rand_u32(asm_fmts[2].get(2..).unwrap().parse::().unwrap()); fn_params = format!("(a.v, {ival}, {uval})"); - as_params = format!("::<{uval}>(transmute(a), {ival})"); + as_params = format!("::<{uval}>(black_box(transmute(a)), {ival})"); } else { panic!("unsupported assembly format: {}", asm_fmts[2]); } @@ -1456,7 +1457,7 @@ fn gen_test_body( ); let val = rand_u32(asm_fmts[2].get(2..).unwrap().parse::().unwrap()); fn_params = format!("(a.v, b.v, {val})"); - as_params = format!("::<{val}>(transmute(a), transmute(b))"); + as_params = format!("::<{val}>(black_box(transmute(a)), black_box(transmute(b)))"); } else { panic!("unsupported assembly format: {}", asm_fmts[2]); } @@ -1478,7 +1479,7 @@ fn gen_test_body( type_to_ct(in_t[1]) ); fn_params = "(a.v, b, 0)".to_string(); - as_params = "::<0>(transmute(a), o.as_mut_ptr())".to_string(); + as_params = "::<0>(black_box(transmute(a)), o.as_mut_ptr())".to_string(); } else { panic!("unsupported assembly format: {}", asm_fmts[2]); } @@ -1500,7 +1501,7 @@ fn gen_test_body( type_to_ct(in_t[1]) ); fn_params = "(a.v, b, 0)".to_string(); - as_params = "(transmute(a), o.as_mut_ptr(), 0)".to_string(); + as_params = "(black_box(transmute(a)), o.as_mut_ptr(), 0)".to_string(); } else { panic!("unsupported assembly format: {}", asm_fmts[2]); } @@ -1524,7 +1525,7 @@ fn gen_test_body( ); let val = rand_u32(type_to_imm(t).try_into().unwrap()); fn_params = format!("(a.v, b, 0, {val})"); - as_params = format!("::<0, {val}>(transmute(a), o.as_mut_ptr())"); + as_params = format!("::<0, {val}>(black_box(transmute(a)), o.as_mut_ptr())"); } (_, _) => panic!( "unsupported assembly format: {} for {}", From cd061c73afab36f3ecd6df4c46064c56551117ab Mon Sep 17 00:00:00 2001 From: Jules Bertholet Date: Sat, 11 Apr 2026 15:19:20 +0000 Subject: [PATCH 27/64] Extend `core::char`'s documentation of casing issues * Extend `core::char`'s documentation of casing issues * Fix typos * Fix typo Co-authored-by: GrigorenkoPV * Document maximum 3x character expansion This is guaranteed by Unicode. * Fix error in `str` casing method docs --- library/alloc/src/str.rs | 19 +++- library/core/src/char/methods.rs | 161 ++++++++++++++++++++++++++++--- 2 files changed, 161 insertions(+), 19 deletions(-) diff --git a/library/alloc/src/str.rs b/library/alloc/src/str.rs index 8a3326c7d76a7..d7dd616fce776 100644 --- a/library/alloc/src/str.rs +++ b/library/alloc/src/str.rs @@ -335,13 +335,19 @@ impl str { /// Returns the lowercase equivalent of this string slice, as a new [`String`]. /// - /// 'Lowercase' is defined according to the terms of the Unicode Derived Core Property - /// `Lowercase`. + /// 'Lowercase' is defined according to the terms of + /// [Chapter 3 (Conformance)](https://www.unicode.org/versions/latest/core-spec/chapter-3/#G34432) + /// of the Unicode standard. /// /// Since some characters can expand into multiple characters when changing /// the case, this function returns a [`String`] instead of modifying the /// parameter in-place. /// + /// Unlike [`char::to_lowercase()`], this method fully handles the context-dependent + /// casing of Greek sigma. However, like that method, it does not handle locale-specific + /// casing, like Turkish and Azeri I/ı/İ/i. See that method's documentation + /// for more information. + /// /// # Examples /// /// Basic usage: @@ -426,13 +432,18 @@ impl str { /// Returns the uppercase equivalent of this string slice, as a new [`String`]. /// - /// 'Uppercase' is defined according to the terms of the Unicode Derived Core Property - /// `Uppercase`. + /// 'Uppercase' is defined according to the terms of + /// [Chapter 3 (Conformance)](https://www.unicode.org/versions/latest/core-spec/chapter-3/#G34431) + /// of the Unicode standard. /// /// Since some characters can expand into multiple characters when changing /// the case, this function returns a [`String`] instead of modifying the /// parameter in-place. /// + /// Like [`char::to_uppercase()`] this method does not handle language-specific + /// casing, like Turkish and Azeri I/ı/İ/i. See that method's documentation + /// for more information. + /// /// # Examples /// /// Basic usage: diff --git a/library/core/src/char/methods.rs b/library/core/src/char/methods.rs index 46d48afbf5a14..27567e8cd3c14 100644 --- a/library/core/src/char/methods.rs +++ b/library/core/src/char/methods.rs @@ -1149,13 +1149,14 @@ impl char { /// [ucd]: https://www.unicode.org/reports/tr44/ /// [`UnicodeData.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt /// - /// If this `char` requires special considerations (e.g. multiple `char`s) the iterator yields - /// the `char`(s) given by [`SpecialCasing.txt`]. + /// If this `char` expands to multiple `char`s, the iterator yields the `char`s given by + /// [`SpecialCasing.txt`]. The maximum number of `char`s in a case mapping is 3. /// /// [`SpecialCasing.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/SpecialCasing.txt /// /// This operation performs an unconditional mapping without tailoring. That is, the conversion - /// is independent of context and language. + /// is independent of context and language. See [below](#notes-on-context-and-locale) + /// for more information. /// /// In the [Unicode Standard], Chapter 4 (Character Properties) discusses case mapping in /// general and Chapter 3 (Conformance) discusses the default algorithm for case conversion. @@ -1197,6 +1198,48 @@ impl char { /// // convert into themselves. /// assert_eq!('山'.to_lowercase().to_string(), "山"); /// ``` + /// # Notes on context and locale + /// + /// As stated earlier, this method does not take into account language or context. + /// Below is a non-exhaustive list of situations where this can be relevant. + /// If you need to handle locale-depedendent casing in your code, consider using + /// an external crate, like [`icu_casemap`](https://crates.io/crates/icu_casemap) + /// which is developed by Unicode. + /// + /// ## Greek sigma + /// + /// In Greek, the letter simga (uppercase Σ) has two lowercase forms: + /// ς which is used only at the end of a word, and σ which is used everywhere else. + /// `to_lowercase()` always uses the second form: + /// + /// ``` + /// assert_eq!('Σ'.to_lowercase().to_string(), "σ"); + /// ``` + /// + /// ## Turkish and Azeri I/ı/İ/i + /// + /// In Turkish and Azeri, the equivalent of 'i' in Latin has five forms instead of two: + /// + /// * 'Dotless': I / ı, sometimes written ï + /// * 'Dotted': İ / i + /// + /// Note that the uppercase undotted 'I' is the same as the Latin. Therefore: + /// + /// ``` + /// let lower_i = 'I'.to_lowercase().to_string(); + /// ``` + /// + /// The value of `lower_i` here relies on the language of the text: if we're + /// in `en-US`, it should be `"i"`, but if we're in `tr-TR` or `az-AZ`, it should + /// be `"ı"`. `to_lowercase()` does not take this into account, and so: + /// + /// ``` + /// let lower_i = 'I'.to_lowercase().to_string(); + /// + /// assert_eq!(lower_i, "i"); + /// ``` + /// + /// holds across languages. #[must_use = "this returns the lowercased character as a new iterator, \ without modifying the original"] #[stable(feature = "rust1", since = "1.0.0")] @@ -1209,8 +1252,10 @@ impl char { /// `char`s. /// /// This is usually, but not always, equivalent to the uppercase mapping - /// returned by [`Self::to_uppercase`]. Prefer this method when seeking to capitalize - /// Only The First Letter of a word, but use [`Self::to_uppercase`] for ALL CAPS. + /// returned by [`to_uppercase()`]. Prefer this method when seeking to capitalize + /// Only The First Letter of a word, but use [`to_uppercase()`] for ALL CAPS. + /// See [below](#difference-from-uppercase) for a thorough explanation + /// of the difference between the two methods. /// /// If this `char` does not have a titlecase mapping, the iterator yields the same `char`. /// @@ -1220,13 +1265,14 @@ impl char { /// [ucd]: https://www.unicode.org/reports/tr44/ /// [`UnicodeData.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt /// - /// If this `char` requires special considerations (e.g. multiple `char`s) the iterator yields - /// the `char`(s) given by [`SpecialCasing.txt`]. + /// If this `char` expands to multiple `char`s, the iterator yields the `char`s given by + /// [`SpecialCasing.txt`]. The maximum number of `char`s in a case mapping is 3. /// /// [`SpecialCasing.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/SpecialCasing.txt /// /// This operation performs an unconditional mapping without tailoring. That is, the conversion - /// is independent of context and language. + /// is independent of context and language. See [below](#note-on-locale) + /// for more information. /// /// In the [Unicode Standard], Chapter 4 (Character Properties) discusses case mapping in /// general and Chapter 3 (Conformance) discusses the default algorithm for case conversion. @@ -1263,8 +1309,9 @@ impl char { /// ``` /// #![feature(titlecase)] /// assert_eq!('c'.to_titlecase().to_string(), "C"); + /// assert_eq!('ა'.to_titlecase().to_string(), "ა"); /// assert_eq!('dž'.to_titlecase().to_string(), "Dž"); - /// assert_eq!('ῼ'.to_titlecase().to_string(), "ῼ"); + /// assert_eq!('ᾨ'.to_titlecase().to_string(), "ᾨ"); /// /// // Sometimes the result is more than one character: /// assert_eq!('ß'.to_titlecase().to_string(), "Ss"); @@ -1274,8 +1321,78 @@ impl char { /// assert_eq!('山'.to_titlecase().to_string(), "山"); /// ``` /// + /// # Difference from uppercase + /// + /// Currently, there are three classes of characters where [`to_uppercase()`] + /// and `to_titlecase()` give different results: + /// + /// ## Georgian script + /// + /// Each letter in the modern Georgian alphabet can be written in one of two forms: + /// the typical lowercase-like "mkhedruli" form, and a variant uppercase-like "mtavruli" + /// form. However, unlike uppercase in most cased scripts, mtavruli is not typically used + /// to start sentences, denote proper nouns, or for any other purpose + /// in running text. It is instead confined to titles and headings, which are written entirely + /// in mtavruli. For this reason, [`to_uppercase()`] applied to a Georgian letter + /// will return the mtavruli form, but `to_titlecase()` will return the mkhedruli form. + /// + /// ``` + /// #![feature(titlecase)] + /// let ani = 'ა'; // First letter of the Georgian alphabet, in mkhedruli form + /// + /// // Titlecasing mkhedruli maps it to itself... + /// assert_eq!(ani.to_titlecase().to_string(), ani.to_string()); + /// + /// // but uppercasing it maps it to mtavruli + /// assert_eq!(ani.to_uppercase().to_string(), "Ა"); + /// ``` + /// + /// ## Compatibility digraphs for Latin-alphabet Serbo-Croatian + /// + /// The standard Latin alphabet for the Serbo-Croatian language + /// (Bosnian, Croatian, Montenegrin, and Serbian) contains + /// three digraphs: Dž, Lj, and Nj. These are usually represented as + /// two characters. However, for compatibility with older character sets, + /// Unicode includes single-character versions of these digraphs. + /// Each has a uppercase, titlecase, and lowercase version: + /// + /// - `'DŽ'`, `'Dž'`, `'dž'` + /// - `'LJ'`, `'Lj'`, `'lj'` + /// - `'NJ'`, `'Nj'`, `'nj'` + /// + /// Unicode additionally encodes a casing triad for the Dz digraph + /// without the caron: `'DZ'`, `'Dz'`, `'dz'`. + /// + /// ## Iota-subscritped Greek vowels + /// + /// In ancient Greek, the long vowels alpha (α), eta (η), and omega (ω) + /// were sometimes followed by an iota (ι), forming a diphthong. Over time, + /// the diphthong pronunciation was slowly lost, with the iota becoming mute. + /// Eventually, the ι disappeared from the spelling as well. + /// However, there remains a need to represent ancient texts faithfully. + /// + /// Modern editions of ancient Greek texts commonly use a reduced-sized + /// ι symbol to denote mute iotas, while distinguishing them from ιs + /// which continued to affect pronunciation. The exact standard differs + /// between different publications. Some render the mute ι below its associated + /// vowel (subscript), while others place it to the right of said vowel (adscript). + /// The interaction of mute ι symbols with casing also varies. + /// + /// The Unicode Standard, for its default casing rules, chose to make lowercase + /// Greek vowels with iota subscipt (e.g. `'ᾠ'`) titlecase to the uppercase vowel + /// with iota subscript (`'ᾨ'`) but uppercase to the uppercase vowel followed by + /// full-size uppercase iota (`"ὨΙ"`). This is just one convention among many + /// in common use, but it is the one Unicode settled on, + /// so it is what this method does also. + /// /// # Note on locale /// + /// As stated above, this method is locale-insensitive. + /// If you need locale support, consider using an external crate, + /// like [`icu_casemap`](https://crates.io/crates/icu_casemap) + /// which is developed by Unicode. A description of a common + /// locale-dependent casing issue follows: + /// /// In Turkish and Azeri, the equivalent of 'i' in Latin has five forms instead of two: /// /// * 'Dotless': I / ı, sometimes written ï @@ -1300,6 +1417,8 @@ impl char { /// ``` /// /// holds across languages. + /// + /// [`to_uppercase()`]: Self::to_uppercase() #[must_use = "this returns the titlecased character as a new iterator, \ without modifying the original"] #[unstable(feature = "titlecase", issue = "153892")] @@ -1311,8 +1430,9 @@ impl char { /// Returns an iterator that yields the uppercase mapping of this `char` as one or more /// `char`s. /// - /// Prefer this method when converting a word into ALL CAPS, but consider [`Self::to_titlecase`] - /// instead if you seek to capitalize Only The First Letter. + /// Prefer this method when converting a word into ALL CAPS, but consider [`to_titlecase()`] + /// instead if you seek to capitalize Only The First Letter. See that method's documentation + /// for more information on the difference between the two. /// /// If this `char` does not have an uppercase mapping, the iterator yields the same `char`. /// @@ -1322,13 +1442,14 @@ impl char { /// [ucd]: https://www.unicode.org/reports/tr44/ /// [`UnicodeData.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt /// - /// If this `char` requires special considerations (e.g. multiple `char`s) the iterator yields - /// the `char`(s) given by [`SpecialCasing.txt`]. + /// If this `char` expands to multiple `char`s, the iterator yields the `char`s given by + /// [`SpecialCasing.txt`]. The maximum number of `char`s in a case mapping is 3. /// /// [`SpecialCasing.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/SpecialCasing.txt /// /// This operation performs an unconditional mapping without tailoring. That is, the conversion - /// is independent of context and language. + /// is independent of context and language. See [below](#note-on-locale) + /// for more information. /// /// In the [Unicode Standard], Chapter 4 (Character Properties) discusses case mapping in /// general and Chapter 3 (Conformance) discusses the default algorithm for case conversion. @@ -1336,6 +1457,7 @@ impl char { /// [Unicode Standard]: https://www.unicode.org/versions/latest/ /// /// # Examples + /// /// `'ſt'` (U+FB05) is a single Unicode code point (a ligature) that maps to "ST" in uppercase. /// /// As an iterator: @@ -1363,11 +1485,12 @@ impl char { /// /// ``` /// assert_eq!('c'.to_uppercase().to_string(), "C"); + /// assert_eq!('ა'.to_uppercase().to_string(), "Ა"); /// assert_eq!('dž'.to_uppercase().to_string(), "DŽ"); /// /// // Sometimes the result is more than one character: /// assert_eq!('ſt'.to_uppercase().to_string(), "ST"); - /// assert_eq!('ῼ'.to_uppercase().to_string(), "ΩΙ"); + /// assert_eq!('ᾨ'.to_uppercase().to_string(), "ὨΙ"); /// /// // Characters that do not have both uppercase and lowercase /// // convert into themselves. @@ -1376,6 +1499,12 @@ impl char { /// /// # Note on locale /// + /// As stated above, this method is locale-insensitive. + /// If you need locale support, consider using an external crate, + /// like [`icu_casemap`](https://crates.io/crates/icu_casemap) + /// which is developed by Unicode. A description of a common + /// locale-dependent casing issue follows: + /// /// In Turkish and Azeri, the equivalent of 'i' in Latin has five forms instead of two: /// /// * 'Dotless': I / ı, sometimes written ï @@ -1398,6 +1527,8 @@ impl char { /// ``` /// /// holds across languages. + /// + /// [`to_titlecase()`]: Self::to_titlecase() #[must_use = "this returns the uppercased character as a new iterator, \ without modifying the original"] #[stable(feature = "rust1", since = "1.0.0")] From 139f9648dd57ea7261839d712ac43e286c8d09d4 Mon Sep 17 00:00:00 2001 From: sayantn Date: Mon, 13 Apr 2026 00:06:34 +0530 Subject: [PATCH 28/64] Update SDE to v10.8.0 --- .../stdarch/ci/docker/x86_64-unknown-linux-gnu/Dockerfile | 2 +- library/stdarch/ci/docker/x86_64-unknown-linux-gnu/cpuid.def | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/library/stdarch/ci/docker/x86_64-unknown-linux-gnu/Dockerfile b/library/stdarch/ci/docker/x86_64-unknown-linux-gnu/Dockerfile index a357449d51e3d..17c6d25215aeb 100644 --- a/library/stdarch/ci/docker/x86_64-unknown-linux-gnu/Dockerfile +++ b/library/stdarch/ci/docker/x86_64-unknown-linux-gnu/Dockerfile @@ -12,7 +12,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ build-essential \ lld -RUN wget http://ci-mirrors.rust-lang.org/stdarch/sde-external-10.5.0-2026-01-13-lin.tar.xz -O sde.tar.xz +RUN wget http://ci-mirrors.rust-lang.org/sde-external-10.8.0-2026-03-15-lin.tar.xz -O sde.tar.xz RUN mkdir intel-sde RUN tar -xJf sde.tar.xz --strip-components=1 -C intel-sde ENV CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER="/intel-sde/sde64 \ diff --git a/library/stdarch/ci/docker/x86_64-unknown-linux-gnu/cpuid.def b/library/stdarch/ci/docker/x86_64-unknown-linux-gnu/cpuid.def index acf023ed0dc49..3bd657873e553 100644 --- a/library/stdarch/ci/docker/x86_64-unknown-linux-gnu/cpuid.def +++ b/library/stdarch/ci/docker/x86_64-unknown-linux-gnu/cpuid.def @@ -1,4 +1,4 @@ -# Copyright (C) 2017-2025 Intel Corporation. +# Copyright (C) 2017-2026 Intel Corporation. # # This software and the related documents are Intel copyrighted materials, and your # use of them is governed by the express license under which they were provided to @@ -23,8 +23,9 @@ 00000004 00000004 => 00000000 00000000 00000000 00000000 00000005 ******** => 00000040 00000040 00000003 00042120 #MONITOR/MWAIT 00000006 ******** => 00000077 00000002 00000001 00000000 #Thermal and Power -00000007 00000000 => 00000001 f3bfbfbf bac05ffe 03d54130 #Extended Features +00000007 00000000 => 00000002 f3bfbfbf bac05ffe 03d54130 #Extended Features 00000007 00000001 => 98ee00bf 00000002 00000020 1d29cd3e +00000007 00000002 => 00000000 00000000 00000000 00000010 00000008 ******** => 00000000 00000000 00000000 00000000 00000009 ******** => 00000000 00000000 00000000 00000000 #Direct Cache 0000000a ******** => 07300403 00000000 00000000 00000603 From 9f43e2a99f32bc11e9be16192450b3598030fbce Mon Sep 17 00:00:00 2001 From: David Wood Date: Mon, 2 Mar 2026 11:03:13 +0000 Subject: [PATCH 29/64] stdarch-verify: re-add runtime test check This was accidentally removed in 713444d. --- .../crates/stdarch-verify/tests/arm.rs | 4706 ++++++++++++++++- 1 file changed, 4482 insertions(+), 224 deletions(-) diff --git a/library/stdarch/crates/stdarch-verify/tests/arm.rs b/library/stdarch/crates/stdarch-verify/tests/arm.rs index 3ef9ce2a38b69..c5744de3f644b 100644 --- a/library/stdarch/crates/stdarch-verify/tests/arm.rs +++ b/library/stdarch/crates/stdarch-verify/tests/arm.rs @@ -183,230 +183,20 @@ fn verify_all_signatures() { let mut all_valid = true; for rust in FUNCTIONS { if !rust.has_test { - let skip = [ - "vaddq_s64", - "vaddq_u64", - "vrsqrte_f32", - "vtbl1_s8", - "vtbl1_u8", - "vtbl1_p8", - "vtbl2_s8", - "vtbl2_u8", - "vtbl2_p8", - "vtbl3_s8", - "vtbl3_u8", - "vtbl3_p8", - "vtbl4_s8", - "vtbl4_u8", - "vtbl4_p8", - "vtbx1_s8", - "vtbx1_u8", - "vtbx1_p8", - "vtbx2_s8", - "vtbx2_u8", - "vtbx2_p8", - "vtbx3_s8", - "vtbx3_u8", - "vtbx3_p8", - "vtbx4_s8", - "vtbx4_u8", - "vtbx4_p8", - "udf", - "_clz_u8", - "_clz_u16", - "_clz_u32", - "_rbit_u32", - "_rev_u16", - "_rev_u32", - "__breakpoint", - "vpminq_f32", - "vpminq_f64", - "vpmaxq_f32", - "vpmaxq_f64", - "vcombine_s8", - "vcombine_s16", - "vcombine_s32", - "vcombine_s64", - "vcombine_u8", - "vcombine_u16", - "vcombine_u32", - "vcombine_u64", - "vcombine_p64", - "vcombine_f32", - "vcombine_p8", - "vcombine_p16", - "vcombine_f64", - "vtbl1_s8", - "vtbl1_u8", - "vtbl1_p8", - "vtbl2_s8", - "vtbl2_u8", - "vtbl2_p8", - "vtbl3_s8", - "vtbl3_u8", - "vtbl3_p8", - "vtbl4_s8", - "vtbl4_u8", - "vtbl4_p8", - "vtbx1_s8", - "vtbx1_u8", - "vtbx1_p8", - "vtbx2_s8", - "vtbx2_u8", - "vtbx2_p8", - "vtbx3_s8", - "vtbx3_u8", - "vtbx3_p8", - "vtbx4_s8", - "vtbx4_u8", - "vtbx4_p8", - "vqtbl1_s8", - "vqtbl1q_s8", - "vqtbl1_u8", - "vqtbl1q_u8", - "vqtbl1_p8", - "vqtbl1q_p8", - "vqtbx1_s8", - "vqtbx1q_s8", - "vqtbx1_u8", - "vqtbx1q_u8", - "vqtbx1_p8", - "vqtbx1q_p8", - "vqtbl2_s8", - "vqtbl2q_s8", - "vqtbl2_u8", - "vqtbl2q_u8", - "vqtbl2_p8", - "vqtbl2q_p8", - "vqtbx2_s8", - "vqtbx2q_s8", - "vqtbx2_u8", - "vqtbx2q_u8", - "vqtbx2_p8", - "vqtbx2q_p8", - "vqtbl3_s8", - "vqtbl3q_s8", - "vqtbl3_u8", - "vqtbl3q_u8", - "vqtbl3_p8", - "vqtbl3q_p8", - "vqtbx3_s8", - "vqtbx3q_s8", - "vqtbx3_u8", - "vqtbx3q_u8", - "vqtbx3_p8", - "vqtbx3q_p8", - "vqtbl4_s8", - "vqtbl4q_s8", - "vqtbl4_u8", - "vqtbl4q_u8", - "vqtbl4_p8", - "vqtbl4q_p8", - "vqtbx4_s8", - "vqtbx4q_s8", - "vqtbx4_u8", - "vqtbx4q_u8", - "vqtbx4_p8", - "vqtbx4q_p8", - "brk", - "_rev_u64", - "_clz_u64", - "_rbit_u64", - "_cls_u32", - "_cls_u64", - "_prefetch", - "vsli_n_s8", - "vsliq_n_s8", - "vsli_n_s16", - "vsliq_n_s16", - "vsli_n_s32", - "vsliq_n_s32", - "vsli_n_s64", - "vsliq_n_s64", - "vsli_n_u8", - "vsliq_n_u8", - "vsli_n_u16", - "vsliq_n_u16", - "vsli_n_u32", - "vsliq_n_u32", - "vsli_n_u64", - "vsliq_n_u64", - "vsli_n_p8", - "vsliq_n_p8", - "vsli_n_p16", - "vsliq_n_p16", - "vsli_n_p64", - "vsliq_n_p64", - "vsri_n_s8", - "vsriq_n_s8", - "vsri_n_s16", - "vsriq_n_s16", - "vsri_n_s32", - "vsriq_n_s32", - "vsri_n_s64", - "vsriq_n_s64", - "vsri_n_u8", - "vsriq_n_u8", - "vsri_n_u16", - "vsriq_n_u16", - "vsri_n_u32", - "vsriq_n_u32", - "vsri_n_u64", - "vsriq_n_u64", - "vsri_n_p8", - "vsriq_n_p8", - "vsri_n_p16", - "vsriq_n_p16", - "vsri_n_p64", - "vsriq_n_p64", - "__smulbb", - "__smultb", - "__smulbt", - "__smultt", - "__smulwb", - "__smulwt", - "__qadd", - "__qsub", - "__qdbl", - "__smlabb", - "__smlabt", - "__smlatb", - "__smlatt", - "__smlawb", - "__smlawt", - "__qadd8", - "__qsub8", - "__qsub16", - "__qadd16", - "__qasx", - "__qsax", - "__sadd16", - "__sadd8", - "__smlad", - "__smlsd", - "__sasx", - "__sel", - "__shadd8", - "__shadd16", - "__shsub8", - "__usub8", - "__ssub8", - "__shsub16", - "__smuad", - "__smuadx", - "__smusd", - "__smusdx", - "__usad8", - "__usada8", - "__ldrex", - "__strex", - "__ldrexb", - "__strexb", - "__ldrexh", - "__strexh", - "__clrex", - "__dbg", - ]; + if !SKIP_RUNTIME_TESTS.contains(&rust.name) { + println!( + "missing run-time test named `test_{}` for `{}`", + { + let mut id = rust.name; + while id.starts_with('_') { + id = &id[1..]; + } + id + }, + rust.name + ); + all_valid = false; + } } // Skip some intrinsics that aren't NEON and are located in different @@ -743,3 +533,4471 @@ fn parse_ty_base(s: &str) -> &'static Type { _ => panic!("failed to parse json type {s:?}"), } } + +// FIXME(arm-maintainers): With the advent of the `intrinsic-test` tool, new tests of this kind +// are no longer being added and just adding to this list indefinitely isn't the best solution for +// dealing with that. +static SKIP_RUNTIME_TESTS: &'static [&'static str] = &[ + "vaddq_s64", + "vaddq_u64", + "vrsqrte_f32", + "vtbl1_s8", + "vtbl1_u8", + "vtbl1_p8", + "vtbl2_s8", + "vtbl2_u8", + "vtbl2_p8", + "vtbl3_s8", + "vtbl3_u8", + "vtbl3_p8", + "vtbl4_s8", + "vtbl4_u8", + "vtbl4_p8", + "vtbx1_s8", + "vtbx1_u8", + "vtbx1_p8", + "vtbx2_s8", + "vtbx2_u8", + "vtbx2_p8", + "vtbx3_s8", + "vtbx3_u8", + "vtbx3_p8", + "vtbx4_s8", + "vtbx4_u8", + "vtbx4_p8", + "udf", + "_clz_u8", + "_clz_u16", + "_clz_u32", + "_rbit_u32", + "_rev_u16", + "_rev_u32", + "__breakpoint", + "vpminq_f32", + "vpminq_f64", + "vpmaxq_f32", + "vpmaxq_f64", + "vcombine_s8", + "vcombine_s16", + "vcombine_s32", + "vcombine_s64", + "vcombine_u8", + "vcombine_u16", + "vcombine_u32", + "vcombine_u64", + "vcombine_p64", + "vcombine_f32", + "vcombine_p8", + "vcombine_p16", + "vcombine_f64", + "vtbl1_s8", + "vtbl1_u8", + "vtbl1_p8", + "vtbl2_s8", + "vtbl2_u8", + "vtbl2_p8", + "vtbl3_s8", + "vtbl3_u8", + "vtbl3_p8", + "vtbl4_s8", + "vtbl4_u8", + "vtbl4_p8", + "vtbx1_s8", + "vtbx1_u8", + "vtbx1_p8", + "vtbx2_s8", + "vtbx2_u8", + "vtbx2_p8", + "vtbx3_s8", + "vtbx3_u8", + "vtbx3_p8", + "vtbx4_s8", + "vtbx4_u8", + "vtbx4_p8", + "vqtbl1_s8", + "vqtbl1q_s8", + "vqtbl1_u8", + "vqtbl1q_u8", + "vqtbl1_p8", + "vqtbl1q_p8", + "vqtbx1_s8", + "vqtbx1q_s8", + "vqtbx1_u8", + "vqtbx1q_u8", + "vqtbx1_p8", + "vqtbx1q_p8", + "vqtbl2_s8", + "vqtbl2q_s8", + "vqtbl2_u8", + "vqtbl2q_u8", + "vqtbl2_p8", + "vqtbl2q_p8", + "vqtbx2_s8", + "vqtbx2q_s8", + "vqtbx2_u8", + "vqtbx2q_u8", + "vqtbx2_p8", + "vqtbx2q_p8", + "vqtbl3_s8", + "vqtbl3q_s8", + "vqtbl3_u8", + "vqtbl3q_u8", + "vqtbl3_p8", + "vqtbl3q_p8", + "vqtbx3_s8", + "vqtbx3q_s8", + "vqtbx3_u8", + "vqtbx3q_u8", + "vqtbx3_p8", + "vqtbx3q_p8", + "vqtbl4_s8", + "vqtbl4q_s8", + "vqtbl4_u8", + "vqtbl4q_u8", + "vqtbl4_p8", + "vqtbl4q_p8", + "vqtbx4_s8", + "vqtbx4q_s8", + "vqtbx4_u8", + "vqtbx4q_u8", + "vqtbx4_p8", + "vqtbx4q_p8", + "brk", + "_rev_u64", + "_clz_u64", + "_rbit_u64", + "_cls_u32", + "_cls_u64", + "_prefetch", + "vsli_n_s8", + "vsliq_n_s8", + "vsli_n_s16", + "vsliq_n_s16", + "vsli_n_s32", + "vsliq_n_s32", + "vsli_n_s64", + "vsliq_n_s64", + "vsli_n_u8", + "vsliq_n_u8", + "vsli_n_u16", + "vsliq_n_u16", + "vsli_n_u32", + "vsliq_n_u32", + "vsli_n_u64", + "vsliq_n_u64", + "vsli_n_p8", + "vsliq_n_p8", + "vsli_n_p16", + "vsliq_n_p16", + "vsli_n_p64", + "vsliq_n_p64", + "vsri_n_s8", + "vsriq_n_s8", + "vsri_n_s16", + "vsriq_n_s16", + "vsri_n_s32", + "vsriq_n_s32", + "vsri_n_s64", + "vsriq_n_s64", + "vsri_n_u8", + "vsriq_n_u8", + "vsri_n_u16", + "vsriq_n_u16", + "vsri_n_u32", + "vsriq_n_u32", + "vsri_n_u64", + "vsriq_n_u64", + "vsri_n_p8", + "vsriq_n_p8", + "vsri_n_p16", + "vsriq_n_p16", + "vsri_n_p64", + "vsriq_n_p64", + "__smulbb", + "__smultb", + "__smulbt", + "__smultt", + "__smulwb", + "__smulwt", + "__qadd", + "__qsub", + "__qdbl", + "__smlabb", + "__smlabt", + "__smlatb", + "__smlatt", + "__smlawb", + "__smlawt", + "__qadd8", + "__qsub8", + "__qsub16", + "__qadd16", + "__qasx", + "__qsax", + "__sadd16", + "__sadd8", + "__smlad", + "__smlsd", + "__sasx", + "__sel", + "__shadd8", + "__shadd16", + "__shsub8", + "__usub8", + "__ssub8", + "__shsub16", + "__smuad", + "__smuadx", + "__smusd", + "__smusdx", + "__usad8", + "__usada8", + "__ldrex", + "__strex", + "__ldrexb", + "__strexb", + "__ldrexh", + "__strexh", + "__clrex", + "__dbg", + "__crc32cd", + "__crc32d", + "__jcvt", + "vabal_high_s8", + "vabal_high_s16", + "vabal_high_s32", + "vabal_high_u8", + "vabal_high_u16", + "vabal_high_u32", + "vabd_f64", + "vabdq_f64", + "vabdd_f64", + "vabds_f32", + "vabdh_f16", + "vabdl_high_s16", + "vabdl_high_s32", + "vabdl_high_s8", + "vabdl_high_u8", + "vabdl_high_u16", + "vabdl_high_u32", + "vabs_f64", + "vabsq_f64", + "vabs_s64", + "vabsq_s64", + "vabsd_s64", + "vaddlv_s16", + "vaddlvq_s16", + "vaddlvq_s32", + "vaddlv_s32", + "vaddlv_s8", + "vaddlvq_s8", + "vaddlv_u16", + "vaddlvq_u16", + "vaddlvq_u32", + "vaddlv_u32", + "vaddlv_u8", + "vaddlvq_u8", + "vaddv_f32", + "vaddvq_f32", + "vaddvq_f64", + "vaddv_s32", + "vaddv_s8", + "vaddvq_s8", + "vaddv_s16", + "vaddvq_s16", + "vaddvq_s32", + "vaddv_u32", + "vaddv_u8", + "vaddvq_u8", + "vaddv_u16", + "vaddvq_u16", + "vaddvq_u32", + "vaddvq_s64", + "vaddvq_u64", + "vamax_f16", + "vamaxq_f16", + "vamax_f32", + "vamaxq_f32", + "vamaxq_f64", + "vamin_f16", + "vaminq_f16", + "vamin_f32", + "vaminq_f32", + "vaminq_f64", + "vbcaxq_s8", + "vbcaxq_s16", + "vbcaxq_s32", + "vbcaxq_s64", + "vbcaxq_u8", + "vbcaxq_u16", + "vbcaxq_u32", + "vbcaxq_u64", + "vcadd_rot270_f16", + "vcaddq_rot270_f16", + "vcadd_rot270_f32", + "vcaddq_rot270_f32", + "vcaddq_rot270_f64", + "vcadd_rot90_f16", + "vcaddq_rot90_f16", + "vcadd_rot90_f32", + "vcaddq_rot90_f32", + "vcaddq_rot90_f64", + "vcage_f64", + "vcageq_f64", + "vcaged_f64", + "vcages_f32", + "vcageh_f16", + "vcagt_f64", + "vcagtq_f64", + "vcagtd_f64", + "vcagts_f32", + "vcagth_f16", + "vcale_f64", + "vcaleq_f64", + "vcaled_f64", + "vcales_f32", + "vcaleh_f16", + "vcalt_f64", + "vcaltq_f64", + "vcaltd_f64", + "vcalts_f32", + "vcalth_f16", + "vceq_f64", + "vceqq_f64", + "vceq_s64", + "vceqq_s64", + "vceq_u64", + "vceqq_u64", + "vceq_p64", + "vceqq_p64", + "vceqd_f64", + "vceqs_f32", + "vceqd_s64", + "vceqd_u64", + "vceqh_f16", + "vceqz_f16", + "vceqzq_f16", + "vceqz_f32", + "vceqzq_f32", + "vceqz_f64", + "vceqzq_f64", + "vceqz_s8", + "vceqzq_s8", + "vceqz_s16", + "vceqzq_s16", + "vceqz_s32", + "vceqzq_s32", + "vceqz_s64", + "vceqzq_s64", + "vceqz_p8", + "vceqzq_p8", + "vceqz_p64", + "vceqzq_p64", + "vceqz_u8", + "vceqzq_u8", + "vceqz_u16", + "vceqzq_u16", + "vceqz_u32", + "vceqzq_u32", + "vceqz_u64", + "vceqzq_u64", + "vceqzd_s64", + "vceqzd_u64", + "vceqzh_f16", + "vceqzs_f32", + "vceqzd_f64", + "vcge_f64", + "vcgeq_f64", + "vcge_s64", + "vcgeq_s64", + "vcge_u64", + "vcgeq_u64", + "vcged_f64", + "vcges_f32", + "vcged_s64", + "vcged_u64", + "vcgeh_f16", + "vcgez_f32", + "vcgezq_f32", + "vcgez_f64", + "vcgezq_f64", + "vcgez_s8", + "vcgezq_s8", + "vcgez_s16", + "vcgezq_s16", + "vcgez_s32", + "vcgezq_s32", + "vcgez_s64", + "vcgezq_s64", + "vcgezd_f64", + "vcgezs_f32", + "vcgezd_s64", + "vcgezh_f16", + "vcgt_f64", + "vcgtq_f64", + "vcgt_s64", + "vcgtq_s64", + "vcgt_u64", + "vcgtq_u64", + "vcgtd_f64", + "vcgts_f32", + "vcgtd_s64", + "vcgtd_u64", + "vcgth_f16", + "vcgtz_f32", + "vcgtzq_f32", + "vcgtz_f64", + "vcgtzq_f64", + "vcgtz_s8", + "vcgtzq_s8", + "vcgtz_s16", + "vcgtzq_s16", + "vcgtz_s32", + "vcgtzq_s32", + "vcgtz_s64", + "vcgtzq_s64", + "vcgtzd_f64", + "vcgtzs_f32", + "vcgtzd_s64", + "vcgtzh_f16", + "vcle_f64", + "vcleq_f64", + "vcle_s64", + "vcleq_s64", + "vcle_u64", + "vcleq_u64", + "vcled_f64", + "vcles_f32", + "vcled_u64", + "vcled_s64", + "vcleh_f16", + "vclez_f32", + "vclezq_f32", + "vclez_f64", + "vclezq_f64", + "vclez_s8", + "vclezq_s8", + "vclez_s16", + "vclezq_s16", + "vclez_s32", + "vclezq_s32", + "vclez_s64", + "vclezq_s64", + "vclezd_f64", + "vclezs_f32", + "vclezd_s64", + "vclezh_f16", + "vclt_f64", + "vcltq_f64", + "vclt_s64", + "vcltq_s64", + "vclt_u64", + "vcltq_u64", + "vcltd_u64", + "vcltd_s64", + "vclth_f16", + "vclts_f32", + "vcltd_f64", + "vcltz_f32", + "vcltzq_f32", + "vcltz_f64", + "vcltzq_f64", + "vcltz_s8", + "vcltzq_s8", + "vcltz_s16", + "vcltzq_s16", + "vcltz_s32", + "vcltzq_s32", + "vcltz_s64", + "vcltzq_s64", + "vcltzd_f64", + "vcltzs_f32", + "vcltzd_s64", + "vcltzh_f16", + "vcmla_f16", + "vcmlaq_f16", + "vcmla_f32", + "vcmlaq_f32", + "vcmlaq_f64", + "vcmla_lane_f16", + "vcmlaq_lane_f16", + "vcmla_lane_f32", + "vcmlaq_lane_f32", + "vcmla_laneq_f16", + "vcmlaq_laneq_f16", + "vcmla_laneq_f32", + "vcmlaq_laneq_f32", + "vcmla_rot180_f16", + "vcmlaq_rot180_f16", + "vcmla_rot180_f32", + "vcmlaq_rot180_f32", + "vcmlaq_rot180_f64", + "vcmla_rot180_lane_f16", + "vcmlaq_rot180_lane_f16", + "vcmla_rot180_lane_f32", + "vcmlaq_rot180_lane_f32", + "vcmla_rot180_laneq_f16", + "vcmlaq_rot180_laneq_f16", + "vcmla_rot180_laneq_f32", + "vcmlaq_rot180_laneq_f32", + "vcmla_rot270_f16", + "vcmlaq_rot270_f16", + "vcmla_rot270_f32", + "vcmlaq_rot270_f32", + "vcmlaq_rot270_f64", + "vcmla_rot270_lane_f16", + "vcmlaq_rot270_lane_f16", + "vcmla_rot270_lane_f32", + "vcmlaq_rot270_lane_f32", + "vcmla_rot270_laneq_f16", + "vcmlaq_rot270_laneq_f16", + "vcmla_rot270_laneq_f32", + "vcmlaq_rot270_laneq_f32", + "vcmla_rot90_f16", + "vcmlaq_rot90_f16", + "vcmla_rot90_f32", + "vcmlaq_rot90_f32", + "vcmlaq_rot90_f64", + "vcmla_rot90_lane_f16", + "vcmlaq_rot90_lane_f16", + "vcmla_rot90_lane_f32", + "vcmlaq_rot90_lane_f32", + "vcmla_rot90_laneq_f16", + "vcmlaq_rot90_laneq_f16", + "vcmla_rot90_laneq_f32", + "vcmlaq_rot90_laneq_f32", + "vcopy_lane_f32", + "vcopy_lane_s8", + "vcopy_lane_s16", + "vcopy_lane_s32", + "vcopy_lane_u8", + "vcopy_lane_u16", + "vcopy_lane_u32", + "vcopy_lane_p8", + "vcopy_lane_p16", + "vcopy_laneq_f32", + "vcopy_laneq_s8", + "vcopy_laneq_s16", + "vcopy_laneq_s32", + "vcopy_laneq_u8", + "vcopy_laneq_u16", + "vcopy_laneq_u32", + "vcopy_laneq_p8", + "vcopy_laneq_p16", + "vcopyq_lane_f32", + "vcopyq_lane_f64", + "vcopyq_lane_s64", + "vcopyq_lane_u64", + "vcopyq_lane_p64", + "vcopyq_lane_s8", + "vcopyq_lane_s16", + "vcopyq_lane_s32", + "vcopyq_lane_u8", + "vcopyq_lane_u16", + "vcopyq_lane_u32", + "vcopyq_lane_p8", + "vcopyq_lane_p16", + "vcopyq_laneq_f32", + "vcopyq_laneq_f64", + "vcopyq_laneq_s8", + "vcopyq_laneq_s16", + "vcopyq_laneq_s32", + "vcopyq_laneq_s64", + "vcopyq_laneq_u8", + "vcopyq_laneq_u16", + "vcopyq_laneq_u32", + "vcopyq_laneq_u64", + "vcopyq_laneq_p8", + "vcopyq_laneq_p16", + "vcopyq_laneq_p64", + "vcreate_f64", + "vcvt_f32_f64", + "vcvt_f64_f32", + "vcvt_f64_s64", + "vcvtq_f64_s64", + "vcvt_f64_u64", + "vcvtq_f64_u64", + "vcvt_high_f16_f32", + "vcvt_high_f32_f16", + "vcvt_high_f32_f64", + "vcvt_high_f64_f32", + "vcvt_n_f64_s64", + "vcvtq_n_f64_s64", + "vcvt_n_f64_u64", + "vcvtq_n_f64_u64", + "vcvt_n_s64_f64", + "vcvtq_n_s64_f64", + "vcvt_n_u64_f64", + "vcvtq_n_u64_f64", + "vcvt_s64_f64", + "vcvtq_s64_f64", + "vcvt_u64_f64", + "vcvtq_u64_f64", + "vcvta_s16_f16", + "vcvtaq_s16_f16", + "vcvta_s32_f32", + "vcvtaq_s32_f32", + "vcvta_s64_f64", + "vcvtaq_s64_f64", + "vcvta_u16_f16", + "vcvtaq_u16_f16", + "vcvta_u32_f32", + "vcvtaq_u32_f32", + "vcvta_u64_f64", + "vcvtaq_u64_f64", + "vcvtah_s16_f16", + "vcvtah_s32_f16", + "vcvtah_s64_f16", + "vcvtah_u16_f16", + "vcvtah_u32_f16", + "vcvtah_u64_f16", + "vcvtas_s32_f32", + "vcvtad_s64_f64", + "vcvtas_u32_f32", + "vcvtad_u64_f64", + "vcvtd_f64_s64", + "vcvts_f32_s32", + "vcvth_f16_s16", + "vcvth_f16_s32", + "vcvth_f16_s64", + "vcvth_f16_u16", + "vcvth_f16_u32", + "vcvth_f16_u64", + "vcvth_n_f16_s16", + "vcvth_n_f16_s32", + "vcvth_n_f16_s64", + "vcvth_n_f16_u16", + "vcvth_n_f16_u32", + "vcvth_n_f16_u64", + "vcvth_n_s16_f16", + "vcvth_n_s32_f16", + "vcvth_n_s64_f16", + "vcvth_n_u16_f16", + "vcvth_n_u32_f16", + "vcvth_n_u64_f16", + "vcvth_s16_f16", + "vcvth_s32_f16", + "vcvth_s64_f16", + "vcvth_u16_f16", + "vcvth_u32_f16", + "vcvth_u64_f16", + "vcvtm_s16_f16", + "vcvtmq_s16_f16", + "vcvtm_s32_f32", + "vcvtmq_s32_f32", + "vcvtm_s64_f64", + "vcvtmq_s64_f64", + "vcvtm_u16_f16", + "vcvtmq_u16_f16", + "vcvtm_u32_f32", + "vcvtmq_u32_f32", + "vcvtm_u64_f64", + "vcvtmq_u64_f64", + "vcvtmh_s16_f16", + "vcvtmh_s32_f16", + "vcvtmh_s64_f16", + "vcvtmh_u16_f16", + "vcvtmh_u32_f16", + "vcvtmh_u64_f16", + "vcvtms_s32_f32", + "vcvtmd_s64_f64", + "vcvtms_u32_f32", + "vcvtmd_u64_f64", + "vcvtn_s16_f16", + "vcvtnq_s16_f16", + "vcvtn_s32_f32", + "vcvtnq_s32_f32", + "vcvtn_s64_f64", + "vcvtnq_s64_f64", + "vcvtn_u16_f16", + "vcvtnq_u16_f16", + "vcvtn_u32_f32", + "vcvtnq_u32_f32", + "vcvtn_u64_f64", + "vcvtnq_u64_f64", + "vcvtnh_s16_f16", + "vcvtnh_s32_f16", + "vcvtnh_s64_f16", + "vcvtnh_u16_f16", + "vcvtnh_u32_f16", + "vcvtnh_u64_f16", + "vcvtns_s32_f32", + "vcvtnd_s64_f64", + "vcvtns_u32_f32", + "vcvtnd_u64_f64", + "vcvtp_s16_f16", + "vcvtpq_s16_f16", + "vcvtp_s32_f32", + "vcvtpq_s32_f32", + "vcvtp_s64_f64", + "vcvtpq_s64_f64", + "vcvtp_u16_f16", + "vcvtpq_u16_f16", + "vcvtp_u32_f32", + "vcvtpq_u32_f32", + "vcvtp_u64_f64", + "vcvtpq_u64_f64", + "vcvtph_s16_f16", + "vcvtph_s32_f16", + "vcvtph_s64_f16", + "vcvtph_u16_f16", + "vcvtph_u32_f16", + "vcvtph_u64_f16", + "vcvtps_s32_f32", + "vcvtpd_s64_f64", + "vcvtps_u32_f32", + "vcvtpd_u64_f64", + "vcvts_f32_u32", + "vcvtd_f64_u64", + "vcvts_n_f32_s32", + "vcvtd_n_f64_s64", + "vcvts_n_f32_u32", + "vcvtd_n_f64_u64", + "vcvts_n_s32_f32", + "vcvtd_n_s64_f64", + "vcvts_n_u32_f32", + "vcvtd_n_u64_f64", + "vcvts_s32_f32", + "vcvtd_s64_f64", + "vcvts_u32_f32", + "vcvtd_u64_f64", + "vcvtx_f32_f64", + "vcvtx_high_f32_f64", + "vcvtxd_f32_f64", + "vdiv_f16", + "vdivq_f16", + "vdiv_f32", + "vdivq_f32", + "vdiv_f64", + "vdivq_f64", + "vdivh_f16", + "vdup_lane_f64", + "vdup_lane_p64", + "vdup_laneq_f64", + "vdup_laneq_p64", + "vdupb_lane_s8", + "vduph_laneq_s16", + "vdupb_lane_u8", + "vduph_laneq_u16", + "vdupb_lane_p8", + "vduph_laneq_p16", + "vdupb_laneq_s8", + "vdupb_laneq_u8", + "vdupb_laneq_p8", + "vdupd_lane_f64", + "vdupd_lane_s64", + "vdupd_lane_u64", + "vduph_lane_f16", + "vduph_laneq_f16", + "vdupq_lane_f64", + "vdupq_lane_p64", + "vdupq_laneq_f64", + "vdupq_laneq_p64", + "vdups_lane_f32", + "vdupd_laneq_f64", + "vdups_lane_s32", + "vdupd_laneq_s64", + "vdups_lane_u32", + "vdupd_laneq_u64", + "vdups_laneq_f32", + "vduph_lane_s16", + "vdups_laneq_s32", + "vduph_lane_u16", + "vdups_laneq_u32", + "vduph_lane_p16", + "veor3q_s8", + "veor3q_s16", + "veor3q_s32", + "veor3q_s64", + "veor3q_u8", + "veor3q_u16", + "veor3q_u32", + "veor3q_u64", + "vextq_f64", + "vextq_p64", + "vfma_f64", + "vfma_lane_f16", + "vfma_laneq_f16", + "vfmaq_lane_f16", + "vfmaq_laneq_f16", + "vfma_lane_f32", + "vfma_laneq_f32", + "vfmaq_lane_f32", + "vfmaq_laneq_f32", + "vfmaq_laneq_f64", + "vfma_lane_f64", + "vfma_laneq_f64", + "vfma_n_f16", + "vfmaq_n_f16", + "vfma_n_f64", + "vfmad_lane_f64", + "vfmah_f16", + "vfmah_lane_f16", + "vfmah_laneq_f16", + "vfmaq_f64", + "vfmaq_lane_f64", + "vfmaq_n_f64", + "vfmas_lane_f32", + "vfmas_laneq_f32", + "vfmad_laneq_f64", + "vfmlal_high_f16", + "vfmlalq_high_f16", + "vfmlal_lane_high_f16", + "vfmlal_laneq_high_f16", + "vfmlalq_lane_high_f16", + "vfmlalq_laneq_high_f16", + "vfmlal_lane_low_f16", + "vfmlal_laneq_low_f16", + "vfmlalq_lane_low_f16", + "vfmlalq_laneq_low_f16", + "vfmlal_low_f16", + "vfmlalq_low_f16", + "vfmlsl_high_f16", + "vfmlslq_high_f16", + "vfmlsl_lane_high_f16", + "vfmlsl_laneq_high_f16", + "vfmlslq_lane_high_f16", + "vfmlslq_laneq_high_f16", + "vfmlsl_lane_low_f16", + "vfmlsl_laneq_low_f16", + "vfmlslq_lane_low_f16", + "vfmlslq_laneq_low_f16", + "vfmlsl_low_f16", + "vfmlslq_low_f16", + "vfms_f64", + "vfms_lane_f16", + "vfms_laneq_f16", + "vfmsq_lane_f16", + "vfmsq_laneq_f16", + "vfms_lane_f32", + "vfms_laneq_f32", + "vfmsq_lane_f32", + "vfmsq_laneq_f32", + "vfmsq_laneq_f64", + "vfms_lane_f64", + "vfms_laneq_f64", + "vfms_n_f16", + "vfmsq_n_f16", + "vfms_n_f64", + "vfmsh_f16", + "vfmsh_lane_f16", + "vfmsh_laneq_f16", + "vfmsq_f64", + "vfmsq_lane_f64", + "vfmsq_n_f64", + "vfmss_lane_f32", + "vfmss_laneq_f32", + "vfmsd_lane_f64", + "vfmsd_laneq_f64", + "vld1_f16", + "vld1q_f16", + "vld1_f64_x2", + "vld1_f64_x3", + "vld1_f64_x4", + "vld1q_f64_x2", + "vld1q_f64_x3", + "vld1q_f64_x4", + "vld2_dup_f64", + "vld2q_dup_f64", + "vld2q_dup_s64", + "vld2_f64", + "vld2_lane_f64", + "vld2_lane_s64", + "vld2_lane_p64", + "vld2_lane_u64", + "vld2q_dup_p64", + "vld2q_dup_p64", + "vld2q_dup_u64", + "vld2q_dup_u64", + "vld2q_f64", + "vld2q_s64", + "vld2q_lane_f64", + "vld2q_lane_s8", + "vld2q_lane_s64", + "vld2q_lane_p64", + "vld2q_lane_u8", + "vld2q_lane_u64", + "vld2q_lane_p8", + "vld2q_p64", + "vld2q_p64", + "vld2q_u64", + "vld3_dup_f64", + "vld3q_dup_f64", + "vld3q_dup_s64", + "vld3_f64", + "vld3_lane_f64", + "vld3_lane_p64", + "vld3_lane_s64", + "vld3_lane_u64", + "vld3q_dup_p64", + "vld3q_dup_p64", + "vld3q_dup_u64", + "vld3q_dup_u64", + "vld3q_f64", + "vld3q_s64", + "vld3q_lane_f64", + "vld3q_lane_p64", + "vld3q_lane_s8", + "vld3q_lane_s64", + "vld3q_lane_u8", + "vld3q_lane_u64", + "vld3q_lane_p8", + "vld3q_p64", + "vld3q_p64", + "vld3q_u64", + "vld4_dup_f64", + "vld4q_dup_f64", + "vld4q_dup_s64", + "vld4_f64", + "vld4_lane_f64", + "vld4_lane_s64", + "vld4_lane_p64", + "vld4_lane_u64", + "vld4q_dup_p64", + "vld4q_dup_p64", + "vld4q_dup_u64", + "vld4q_dup_u64", + "vld4q_f64", + "vld4q_s64", + "vld4q_lane_f64", + "vld4q_lane_s8", + "vld4q_lane_s64", + "vld4q_lane_p64", + "vld4q_lane_u8", + "vld4q_lane_u64", + "vld4q_lane_p8", + "vld4q_p64", + "vld4q_p64", + "vld4q_u64", + "vldap1_lane_s64", + "vldap1q_lane_s64", + "vldap1q_lane_f64", + "vldap1_lane_u64", + "vldap1q_lane_u64", + "vldap1_lane_p64", + "vldap1q_lane_p64", + "vluti2_lane_f16", + "vluti2q_lane_f16", + "vluti2_lane_u8", + "vluti2q_lane_u8", + "vluti2_lane_u16", + "vluti2q_lane_u16", + "vluti2_lane_p8", + "vluti2q_lane_p8", + "vluti2_lane_p16", + "vluti2q_lane_p16", + "vluti2_lane_s8", + "vluti2q_lane_s8", + "vluti2_lane_s16", + "vluti2q_lane_s16", + "vluti2_laneq_f16", + "vluti2q_laneq_f16", + "vluti2_laneq_u8", + "vluti2q_laneq_u8", + "vluti2_laneq_u16", + "vluti2q_laneq_u16", + "vluti2_laneq_p8", + "vluti2q_laneq_p8", + "vluti2_laneq_p16", + "vluti2q_laneq_p16", + "vluti2_laneq_s8", + "vluti2q_laneq_s8", + "vluti2_laneq_s16", + "vluti2q_laneq_s16", + "vluti4q_lane_f16_x2", + "vluti4q_lane_u16_x2", + "vluti4q_lane_p16_x2", + "vluti4q_lane_s16_x2", + "vluti4q_lane_s8", + "vluti4q_lane_u8", + "vluti4q_lane_p8", + "vluti4q_laneq_f16_x2", + "vluti4q_laneq_u16_x2", + "vluti4q_laneq_p16_x2", + "vluti4q_laneq_s16_x2", + "vluti4q_laneq_s8", + "vluti4q_laneq_u8", + "vluti4q_laneq_p8", + "vmax_f64", + "vmaxq_f64", + "vmaxh_f16", + "vmaxnm_f64", + "vmaxnmq_f64", + "vmaxnmh_f16", + "vmaxnmv_f16", + "vmaxnmvq_f16", + "vmaxnmv_f32", + "vmaxnmvq_f64", + "vmaxnmvq_f32", + "vmaxv_f16", + "vmaxvq_f16", + "vmaxv_f32", + "vmaxvq_f32", + "vmaxvq_f64", + "vmaxv_s8", + "vmaxvq_s8", + "vmaxv_s16", + "vmaxvq_s16", + "vmaxv_s32", + "vmaxvq_s32", + "vmaxv_u8", + "vmaxvq_u8", + "vmaxv_u16", + "vmaxvq_u16", + "vmaxv_u32", + "vmaxvq_u32", + "vmin_f64", + "vminq_f64", + "vminh_f16", + "vminnm_f64", + "vminnmq_f64", + "vminnmh_f16", + "vminnmv_f16", + "vminnmvq_f16", + "vminnmv_f32", + "vminnmvq_f64", + "vminnmvq_f32", + "vminv_f16", + "vminvq_f16", + "vminv_f32", + "vminvq_f32", + "vminvq_f64", + "vminv_s8", + "vminvq_s8", + "vminv_s16", + "vminvq_s16", + "vminv_s32", + "vminvq_s32", + "vminv_u8", + "vminvq_u8", + "vminv_u16", + "vminvq_u16", + "vminv_u32", + "vminvq_u32", + "vmla_f64", + "vmlaq_f64", + "vmlal_high_lane_s16", + "vmlal_high_laneq_s16", + "vmlal_high_lane_s32", + "vmlal_high_laneq_s32", + "vmlal_high_lane_u16", + "vmlal_high_laneq_u16", + "vmlal_high_lane_u32", + "vmlal_high_laneq_u32", + "vmlal_high_n_s16", + "vmlal_high_n_s32", + "vmlal_high_n_u16", + "vmlal_high_n_u32", + "vmlal_high_s8", + "vmlal_high_s16", + "vmlal_high_s32", + "vmlal_high_u8", + "vmlal_high_u16", + "vmlal_high_u32", + "vmls_f64", + "vmlsq_f64", + "vmlsl_high_lane_s16", + "vmlsl_high_laneq_s16", + "vmlsl_high_lane_s32", + "vmlsl_high_laneq_s32", + "vmlsl_high_lane_u16", + "vmlsl_high_laneq_u16", + "vmlsl_high_lane_u32", + "vmlsl_high_laneq_u32", + "vmlsl_high_n_s16", + "vmlsl_high_n_s32", + "vmlsl_high_n_u16", + "vmlsl_high_n_u32", + "vmlsl_high_s8", + "vmlsl_high_s16", + "vmlsl_high_s32", + "vmlsl_high_u8", + "vmlsl_high_u16", + "vmlsl_high_u32", + "vmovl_high_s8", + "vmovl_high_s16", + "vmovl_high_s32", + "vmovl_high_u8", + "vmovl_high_u16", + "vmovl_high_u32", + "vmovn_high_s16", + "vmovn_high_s32", + "vmovn_high_s64", + "vmovn_high_u16", + "vmovn_high_u32", + "vmovn_high_u64", + "vmul_f64", + "vmulq_f64", + "vmul_lane_f64", + "vmul_laneq_f16", + "vmulq_laneq_f16", + "vmul_laneq_f64", + "vmul_n_f64", + "vmulq_n_f64", + "vmuld_lane_f64", + "vmulh_f16", + "vmulh_lane_f16", + "vmulh_laneq_f16", + "vmull_high_lane_s16", + "vmull_high_laneq_s16", + "vmull_high_lane_s32", + "vmull_high_laneq_s32", + "vmull_high_lane_u16", + "vmull_high_laneq_u16", + "vmull_high_lane_u32", + "vmull_high_laneq_u32", + "vmull_high_n_s16", + "vmull_high_n_s32", + "vmull_high_n_u16", + "vmull_high_n_u32", + "vmull_high_p64", + "vmull_high_p8", + "vmull_high_s8", + "vmull_high_s16", + "vmull_high_s32", + "vmull_high_u8", + "vmull_high_u16", + "vmull_high_u32", + "vmull_p64", + "vmulq_lane_f64", + "vmulq_laneq_f64", + "vmuls_lane_f32", + "vmuls_laneq_f32", + "vmuld_laneq_f64", + "vmulx_f16", + "vmulxq_f16", + "vmulx_f32", + "vmulxq_f32", + "vmulx_f64", + "vmulxq_f64", + "vmulx_lane_f16", + "vmulx_laneq_f16", + "vmulxq_lane_f16", + "vmulxq_laneq_f16", + "vmulx_lane_f32", + "vmulx_laneq_f32", + "vmulxq_lane_f32", + "vmulxq_laneq_f32", + "vmulxq_laneq_f64", + "vmulx_lane_f64", + "vmulx_laneq_f64", + "vmulx_n_f16", + "vmulxq_n_f16", + "vmulxd_f64", + "vmulxs_f32", + "vmulxd_lane_f64", + "vmulxd_laneq_f64", + "vmulxs_lane_f32", + "vmulxs_laneq_f32", + "vmulxh_f16", + "vmulxh_lane_f16", + "vmulxh_laneq_f16", + "vmulxq_lane_f64", + "vneg_f64", + "vnegq_f64", + "vneg_s64", + "vnegq_s64", + "vnegd_s64", + "vnegh_f16", + "vpaddd_f64", + "vpadds_f32", + "vpaddd_s64", + "vpaddd_u64", + "vpaddq_f16", + "vpaddq_f32", + "vpaddq_f64", + "vpaddq_s8", + "vpaddq_s16", + "vpaddq_s32", + "vpaddq_s64", + "vpaddq_u8", + "vpaddq_u16", + "vpaddq_u32", + "vpaddq_u64", + "vpmax_f16", + "vpmaxq_f16", + "vpmaxnm_f16", + "vpmaxnmq_f16", + "vpmaxnm_f32", + "vpmaxnmq_f32", + "vpmaxnmq_f64", + "vpmaxnmqd_f64", + "vpmaxnms_f32", + "vpmaxq_s8", + "vpmaxq_s16", + "vpmaxq_s32", + "vpmaxq_u8", + "vpmaxq_u16", + "vpmaxq_u32", + "vpmaxqd_f64", + "vpmaxs_f32", + "vpmin_f16", + "vpminq_f16", + "vpminnm_f16", + "vpminnmq_f16", + "vpminnm_f32", + "vpminnmq_f32", + "vpminnmq_f64", + "vpminnmqd_f64", + "vpminnms_f32", + "vpminq_s8", + "vpminq_s16", + "vpminq_s32", + "vpminq_u8", + "vpminq_u16", + "vpminq_u32", + "vpminqd_f64", + "vpmins_f32", + "vqabs_s64", + "vqabsq_s64", + "vqabsb_s8", + "vqabsh_s16", + "vqabss_s32", + "vqabsd_s64", + "vqaddb_s8", + "vqaddh_s16", + "vqaddb_u8", + "vqaddh_u16", + "vqadds_s32", + "vqaddd_s64", + "vqadds_u32", + "vqaddd_u64", + "vqdmlal_high_lane_s16", + "vqdmlal_high_laneq_s16", + "vqdmlal_high_lane_s32", + "vqdmlal_high_laneq_s32", + "vqdmlal_high_n_s16", + "vqdmlal_high_s16", + "vqdmlal_high_n_s32", + "vqdmlal_high_s32", + "vqdmlal_laneq_s16", + "vqdmlal_laneq_s32", + "vqdmlalh_lane_s16", + "vqdmlalh_laneq_s16", + "vqdmlals_lane_s32", + "vqdmlals_laneq_s32", + "vqdmlalh_s16", + "vqdmlals_s32", + "vqdmlsl_high_lane_s16", + "vqdmlsl_high_laneq_s16", + "vqdmlsl_high_lane_s32", + "vqdmlsl_high_laneq_s32", + "vqdmlsl_high_n_s16", + "vqdmlsl_high_s16", + "vqdmlsl_high_n_s32", + "vqdmlsl_high_s32", + "vqdmlsl_laneq_s16", + "vqdmlsl_laneq_s32", + "vqdmlslh_lane_s16", + "vqdmlslh_laneq_s16", + "vqdmlsls_lane_s32", + "vqdmlsls_laneq_s32", + "vqdmlslh_s16", + "vqdmlsls_s32", + "vqdmulh_lane_s16", + "vqdmulhq_lane_s16", + "vqdmulh_lane_s32", + "vqdmulhq_lane_s32", + "vqdmulhh_lane_s16", + "vqdmulhh_laneq_s16", + "vqdmulhh_s16", + "vqdmulhs_s32", + "vqdmulhs_lane_s32", + "vqdmulhs_laneq_s32", + "vqdmull_high_lane_s16", + "vqdmull_high_laneq_s32", + "vqdmull_high_lane_s32", + "vqdmull_high_laneq_s16", + "vqdmull_high_n_s16", + "vqdmull_high_n_s32", + "vqdmull_high_s16", + "vqdmull_high_s32", + "vqdmull_laneq_s16", + "vqdmull_laneq_s32", + "vqdmullh_lane_s16", + "vqdmulls_laneq_s32", + "vqdmullh_laneq_s16", + "vqdmullh_s16", + "vqdmulls_lane_s32", + "vqdmulls_s32", + "vqmovn_high_s16", + "vqmovn_high_s32", + "vqmovn_high_s64", + "vqmovn_high_u16", + "vqmovn_high_u32", + "vqmovn_high_u64", + "vqmovnd_s64", + "vqmovnd_u64", + "vqmovnh_s16", + "vqmovns_s32", + "vqmovnh_u16", + "vqmovns_u32", + "vqmovun_high_s16", + "vqmovun_high_s32", + "vqmovun_high_s64", + "vqmovunh_s16", + "vqmovuns_s32", + "vqmovund_s64", + "vqneg_s64", + "vqnegq_s64", + "vqnegb_s8", + "vqnegh_s16", + "vqnegs_s32", + "vqnegd_s64", + "vqrdmlah_lane_s16", + "vqrdmlah_lane_s32", + "vqrdmlah_laneq_s16", + "vqrdmlah_laneq_s32", + "vqrdmlahq_lane_s16", + "vqrdmlahq_lane_s32", + "vqrdmlahq_laneq_s16", + "vqrdmlahq_laneq_s32", + "vqrdmlah_s16", + "vqrdmlahq_s16", + "vqrdmlah_s32", + "vqrdmlahq_s32", + "vqrdmlahh_lane_s16", + "vqrdmlahh_laneq_s16", + "vqrdmlahs_lane_s32", + "vqrdmlahs_laneq_s32", + "vqrdmlahh_s16", + "vqrdmlahs_s32", + "vqrdmlsh_lane_s16", + "vqrdmlsh_lane_s32", + "vqrdmlsh_laneq_s16", + "vqrdmlsh_laneq_s32", + "vqrdmlshq_lane_s16", + "vqrdmlshq_lane_s32", + "vqrdmlshq_laneq_s16", + "vqrdmlshq_laneq_s32", + "vqrdmlsh_s16", + "vqrdmlshq_s16", + "vqrdmlsh_s32", + "vqrdmlshq_s32", + "vqrdmlshh_lane_s16", + "vqrdmlshh_laneq_s16", + "vqrdmlshs_lane_s32", + "vqrdmlshs_laneq_s32", + "vqrdmlshh_s16", + "vqrdmlshs_s32", + "vqrdmulhh_lane_s16", + "vqrdmulhh_laneq_s16", + "vqrdmulhs_lane_s32", + "vqrdmulhs_laneq_s32", + "vqrdmulhh_s16", + "vqrdmulhs_s32", + "vqrshlb_s8", + "vqrshlh_s16", + "vqrshlb_u8", + "vqrshlh_u16", + "vqrshld_s64", + "vqrshls_s32", + "vqrshls_u32", + "vqrshld_u64", + "vqrshrn_high_n_s16", + "vqrshrn_high_n_s32", + "vqrshrn_high_n_s64", + "vqrshrn_high_n_u16", + "vqrshrn_high_n_u32", + "vqrshrn_high_n_u64", + "vqrshrnd_n_u64", + "vqrshrnh_n_u16", + "vqrshrns_n_u32", + "vqrshrnh_n_s16", + "vqrshrns_n_s32", + "vqrshrnd_n_s64", + "vqrshrun_high_n_s16", + "vqrshrun_high_n_s32", + "vqrshrun_high_n_s64", + "vqrshrund_n_s64", + "vqrshrunh_n_s16", + "vqrshruns_n_s32", + "vqshlb_n_s8", + "vqshld_n_s64", + "vqshlh_n_s16", + "vqshls_n_s32", + "vqshlb_n_u8", + "vqshld_n_u64", + "vqshlh_n_u16", + "vqshls_n_u32", + "vqshlb_s8", + "vqshlh_s16", + "vqshls_s32", + "vqshlb_u8", + "vqshlh_u16", + "vqshls_u32", + "vqshld_s64", + "vqshld_u64", + "vqshlub_n_s8", + "vqshlud_n_s64", + "vqshluh_n_s16", + "vqshlus_n_s32", + "vqshrn_high_n_s16", + "vqshrn_high_n_s32", + "vqshrn_high_n_s64", + "vqshrn_high_n_u16", + "vqshrn_high_n_u32", + "vqshrn_high_n_u64", + "vqshrnd_n_s64", + "vqshrnd_n_u64", + "vqshrnh_n_s16", + "vqshrns_n_s32", + "vqshrnh_n_u16", + "vqshrns_n_u32", + "vqshrun_high_n_s16", + "vqshrun_high_n_s32", + "vqshrun_high_n_s64", + "vqshrund_n_s64", + "vqshrunh_n_s16", + "vqshruns_n_s32", + "vqsubb_s8", + "vqsubh_s16", + "vqsubb_u8", + "vqsubh_u16", + "vqsubs_s32", + "vqsubd_s64", + "vqsubs_u32", + "vqsubd_u64", + "vrax1q_u64", + "vrbit_s8", + "vrbitq_s8", + "vrbit_u8", + "vrbit_u8", + "vrbitq_u8", + "vrbitq_u8", + "vrbit_p8", + "vrbit_p8", + "vrbitq_p8", + "vrbitq_p8", + "vrecpe_f64", + "vrecpeq_f64", + "vrecped_f64", + "vrecpes_f32", + "vrecpeh_f16", + "vrecps_f64", + "vrecpsq_f64", + "vrecpsd_f64", + "vrecpss_f32", + "vrecpsh_f16", + "vrecpxd_f64", + "vrecpxs_f32", + "vrecpxh_f16", + "vreinterpret_f64_f16", + "vreinterpret_f64_f16", + "vreinterpretq_f64_f16", + "vreinterpretq_f64_f16", + "vreinterpret_f16_f64", + "vreinterpret_f16_f64", + "vreinterpretq_f16_f64", + "vreinterpretq_f16_f64", + "vreinterpretq_f64_p128", + "vreinterpretq_f64_p128", + "vreinterpret_f64_f32", + "vreinterpret_f64_f32", + "vreinterpret_p64_f32", + "vreinterpret_p64_f32", + "vreinterpretq_f64_f32", + "vreinterpretq_f64_f32", + "vreinterpretq_p64_f32", + "vreinterpretq_p64_f32", + "vreinterpret_f32_f64", + "vreinterpret_f32_f64", + "vreinterpret_s8_f64", + "vreinterpret_s8_f64", + "vreinterpret_s16_f64", + "vreinterpret_s16_f64", + "vreinterpret_s32_f64", + "vreinterpret_s32_f64", + "vreinterpret_s64_f64", + "vreinterpret_u8_f64", + "vreinterpret_u8_f64", + "vreinterpret_u16_f64", + "vreinterpret_u16_f64", + "vreinterpret_u32_f64", + "vreinterpret_u32_f64", + "vreinterpret_u64_f64", + "vreinterpret_p8_f64", + "vreinterpret_p8_f64", + "vreinterpret_p16_f64", + "vreinterpret_p16_f64", + "vreinterpret_p64_f64", + "vreinterpretq_p128_f64", + "vreinterpretq_p128_f64", + "vreinterpretq_f32_f64", + "vreinterpretq_f32_f64", + "vreinterpretq_s8_f64", + "vreinterpretq_s8_f64", + "vreinterpretq_s16_f64", + "vreinterpretq_s16_f64", + "vreinterpretq_s32_f64", + "vreinterpretq_s32_f64", + "vreinterpretq_s64_f64", + "vreinterpretq_s64_f64", + "vreinterpretq_u8_f64", + "vreinterpretq_u8_f64", + "vreinterpretq_u16_f64", + "vreinterpretq_u16_f64", + "vreinterpretq_u32_f64", + "vreinterpretq_u32_f64", + "vreinterpretq_u64_f64", + "vreinterpretq_u64_f64", + "vreinterpretq_p8_f64", + "vreinterpretq_p8_f64", + "vreinterpretq_p16_f64", + "vreinterpretq_p16_f64", + "vreinterpretq_p64_f64", + "vreinterpretq_p64_f64", + "vreinterpret_f64_s8", + "vreinterpret_f64_s8", + "vreinterpretq_f64_s8", + "vreinterpretq_f64_s8", + "vreinterpret_f64_s16", + "vreinterpret_f64_s16", + "vreinterpretq_f64_s16", + "vreinterpretq_f64_s16", + "vreinterpret_f64_s32", + "vreinterpret_f64_s32", + "vreinterpretq_f64_s32", + "vreinterpretq_f64_s32", + "vreinterpret_f64_s64", + "vreinterpret_p64_s64", + "vreinterpretq_f64_s64", + "vreinterpretq_f64_s64", + "vreinterpretq_p64_s64", + "vreinterpretq_p64_s64", + "vreinterpret_f64_u8", + "vreinterpret_f64_u8", + "vreinterpretq_f64_u8", + "vreinterpretq_f64_u8", + "vreinterpret_f64_u16", + "vreinterpret_f64_u16", + "vreinterpretq_f64_u16", + "vreinterpretq_f64_u16", + "vreinterpret_f64_u32", + "vreinterpret_f64_u32", + "vreinterpretq_f64_u32", + "vreinterpretq_f64_u32", + "vreinterpret_f64_u64", + "vreinterpret_p64_u64", + "vreinterpretq_f64_u64", + "vreinterpretq_f64_u64", + "vreinterpretq_p64_u64", + "vreinterpretq_p64_u64", + "vreinterpret_f64_p8", + "vreinterpret_f64_p8", + "vreinterpretq_f64_p8", + "vreinterpretq_f64_p8", + "vreinterpret_f64_p16", + "vreinterpret_f64_p16", + "vreinterpretq_f64_p16", + "vreinterpretq_f64_p16", + "vreinterpret_f32_p64", + "vreinterpret_f32_p64", + "vreinterpret_f64_p64", + "vreinterpret_s64_p64", + "vreinterpret_u64_p64", + "vreinterpretq_f32_p64", + "vreinterpretq_f32_p64", + "vreinterpretq_f64_p64", + "vreinterpretq_f64_p64", + "vreinterpretq_s64_p64", + "vreinterpretq_s64_p64", + "vreinterpretq_u64_p64", + "vreinterpretq_u64_p64", + "vrnd32x_f32", + "vrnd32xq_f32", + "vrnd32xq_f64", + "vrnd32x_f64", + "vrnd32z_f32", + "vrnd32zq_f32", + "vrnd32zq_f64", + "vrnd32z_f64", + "vrnd64x_f32", + "vrnd64xq_f32", + "vrnd64xq_f64", + "vrnd64x_f64", + "vrnd64z_f32", + "vrnd64zq_f32", + "vrnd64zq_f64", + "vrnd64z_f64", + "vrnd_f16", + "vrndq_f16", + "vrnd_f32", + "vrndq_f32", + "vrnd_f64", + "vrndq_f64", + "vrnda_f16", + "vrndaq_f16", + "vrnda_f32", + "vrndaq_f32", + "vrnda_f64", + "vrndaq_f64", + "vrndah_f16", + "vrndh_f16", + "vrndi_f16", + "vrndiq_f16", + "vrndi_f32", + "vrndiq_f32", + "vrndi_f64", + "vrndiq_f64", + "vrndih_f16", + "vrndm_f16", + "vrndmq_f16", + "vrndm_f32", + "vrndmq_f32", + "vrndm_f64", + "vrndmq_f64", + "vrndmh_f16", + "vrndn_f64", + "vrndnq_f64", + "vrndnh_f16", + "vrndns_f32", + "vrndp_f16", + "vrndpq_f16", + "vrndp_f32", + "vrndpq_f32", + "vrndp_f64", + "vrndpq_f64", + "vrndph_f16", + "vrndx_f16", + "vrndxq_f16", + "vrndx_f32", + "vrndxq_f32", + "vrndx_f64", + "vrndxq_f64", + "vrndxh_f16", + "vrshld_s64", + "vrshld_u64", + "vrshrd_n_s64", + "vrshrd_n_u64", + "vrshrn_high_n_s16", + "vrshrn_high_n_s32", + "vrshrn_high_n_s64", + "vrshrn_high_n_u16", + "vrshrn_high_n_u32", + "vrshrn_high_n_u64", + "vrsqrte_f64", + "vrsqrteq_f64", + "vrsqrted_f64", + "vrsqrtes_f32", + "vrsqrteh_f16", + "vrsqrts_f64", + "vrsqrtsq_f64", + "vrsqrtsd_f64", + "vrsqrtss_f32", + "vrsqrtsh_f16", + "vrsrad_n_s64", + "vrsrad_n_u64", + "vrsubhn_high_s16", + "vrsubhn_high_s32", + "vrsubhn_high_s64", + "vrsubhn_high_u16", + "vrsubhn_high_u32", + "vrsubhn_high_u64", + "vrsubhn_high_s16", + "vrsubhn_high_s32", + "vrsubhn_high_s64", + "vrsubhn_high_u16", + "vrsubhn_high_u32", + "vrsubhn_high_u64", + "vscale_f16", + "vscaleq_f16", + "vscale_f32", + "vscaleq_f32", + "vscaleq_f64", + "vset_lane_f64", + "vsetq_lane_f64", + "vsha512h2q_u64", + "vsha512hq_u64", + "vsha512su0q_u64", + "vsha512su1q_u64", + "vshld_s64", + "vshld_u64", + "vshll_high_n_s8", + "vshll_high_n_s16", + "vshll_high_n_s32", + "vshll_high_n_u8", + "vshll_high_n_u16", + "vshll_high_n_u32", + "vshrn_high_n_s16", + "vshrn_high_n_s32", + "vshrn_high_n_s64", + "vshrn_high_n_u16", + "vshrn_high_n_u32", + "vshrn_high_n_u64", + "vslid_n_s64", + "vslid_n_u64", + "vsm3partw1q_u32", + "vsm3partw2q_u32", + "vsm3ss1q_u32", + "vsm3tt1aq_u32", + "vsm3tt1bq_u32", + "vsm3tt2aq_u32", + "vsm3tt2bq_u32", + "vsm4ekeyq_u32", + "vsm4eq_u32", + "vsqadd_u8", + "vsqaddq_u8", + "vsqadd_u16", + "vsqaddq_u16", + "vsqadd_u32", + "vsqaddq_u32", + "vsqadd_u64", + "vsqaddq_u64", + "vsqaddb_u8", + "vsqaddh_u16", + "vsqaddd_u64", + "vsqadds_u32", + "vsqrt_f16", + "vsqrtq_f16", + "vsqrt_f32", + "vsqrtq_f32", + "vsqrt_f64", + "vsqrtq_f64", + "vsqrth_f16", + "vsrid_n_s64", + "vsrid_n_u64", + "vst1_f16", + "vst1q_f16", + "vst1_f64_x2", + "vst1q_f64_x2", + "vst1_f64_x3", + "vst1q_f64_x3", + "vst1_f64_x4", + "vst1q_f64_x4", + "vst1_lane_f64", + "vst1q_lane_f64", + "vst2_f64", + "vst2_lane_f64", + "vst2_lane_s64", + "vst2_lane_p64", + "vst2_lane_u64", + "vst2q_f64", + "vst2q_s64", + "vst2q_lane_f64", + "vst2q_lane_s8", + "vst2q_lane_s64", + "vst2q_lane_p64", + "vst2q_lane_u8", + "vst2q_lane_u64", + "vst2q_lane_p8", + "vst2q_p64", + "vst2q_u64", + "vst3_f64", + "vst3_lane_f64", + "vst3_lane_s64", + "vst3_lane_p64", + "vst3_lane_u64", + "vst3q_f64", + "vst3q_s64", + "vst3q_lane_f64", + "vst3q_lane_s8", + "vst3q_lane_s64", + "vst3q_lane_p64", + "vst3q_lane_u8", + "vst3q_lane_u64", + "vst3q_lane_p8", + "vst3q_p64", + "vst3q_u64", + "vst4_f64", + "vst4_lane_f64", + "vst4_lane_s64", + "vst4_lane_p64", + "vst4_lane_u64", + "vst4q_f64", + "vst4q_s64", + "vst4q_lane_f64", + "vst4q_lane_s8", + "vst4q_lane_s64", + "vst4q_lane_p64", + "vst4q_lane_u8", + "vst4q_lane_u64", + "vst4q_lane_p8", + "vst4q_p64", + "vst4q_u64", + "vstl1_lane_f64", + "vstl1q_lane_f64", + "vstl1_lane_u64", + "vstl1q_lane_u64", + "vstl1_lane_p64", + "vstl1q_lane_p64", + "vstl1_lane_s64", + "vstl1q_lane_s64", + "vsub_f64", + "vsubq_f64", + "vsubd_s64", + "vsubd_u64", + "vsubh_f16", + "vsubl_high_s8", + "vsubl_high_s16", + "vsubl_high_s32", + "vsubl_high_u8", + "vsubl_high_u16", + "vsubl_high_u32", + "vsubw_high_s8", + "vsubw_high_s16", + "vsubw_high_s32", + "vsubw_high_u8", + "vsubw_high_u16", + "vsubw_high_u32", + "vtrn1_f16", + "vtrn1q_f16", + "vtrn1_f32", + "vtrn1q_f64", + "vtrn1_s32", + "vtrn1q_s64", + "vtrn1_u32", + "vtrn1q_u64", + "vtrn1q_p64", + "vtrn1q_f32", + "vtrn1_s8", + "vtrn1q_s8", + "vtrn1_s16", + "vtrn1q_s16", + "vtrn1q_s32", + "vtrn1_u8", + "vtrn1q_u8", + "vtrn1_u16", + "vtrn1q_u16", + "vtrn1q_u32", + "vtrn1_p8", + "vtrn1q_p8", + "vtrn1_p16", + "vtrn1q_p16", + "vtrn2_f16", + "vtrn2q_f16", + "vtrn2_f32", + "vtrn2q_f64", + "vtrn2_s32", + "vtrn2q_s64", + "vtrn2_u32", + "vtrn2q_u64", + "vtrn2q_p64", + "vtrn2q_f32", + "vtrn2_s8", + "vtrn2q_s8", + "vtrn2_s16", + "vtrn2q_s16", + "vtrn2q_s32", + "vtrn2_u8", + "vtrn2q_u8", + "vtrn2_u16", + "vtrn2q_u16", + "vtrn2q_u32", + "vtrn2_p8", + "vtrn2q_p8", + "vtrn2_p16", + "vtrn2q_p16", + "vtst_s64", + "vtstq_s64", + "vtst_p64", + "vtstq_p64", + "vtst_u64", + "vtstq_u64", + "vtstd_s64", + "vtstd_u64", + "vuqadd_s8", + "vuqaddq_s8", + "vuqadd_s16", + "vuqaddq_s16", + "vuqadd_s32", + "vuqaddq_s32", + "vuqadd_s64", + "vuqaddq_s64", + "vuqaddb_s8", + "vuqaddh_s16", + "vuqaddd_s64", + "vuqadds_s32", + "vuzp1_f16", + "vuzp1q_f16", + "vuzp1_f32", + "vuzp1q_f64", + "vuzp1_s32", + "vuzp1q_s64", + "vuzp1_u32", + "vuzp1q_u64", + "vuzp1q_p64", + "vuzp1q_f32", + "vuzp1_s8", + "vuzp1q_s8", + "vuzp1_s16", + "vuzp1q_s16", + "vuzp1q_s32", + "vuzp1_u8", + "vuzp1q_u8", + "vuzp1_u16", + "vuzp1q_u16", + "vuzp1q_u32", + "vuzp1_p8", + "vuzp1q_p8", + "vuzp1_p16", + "vuzp1q_p16", + "vuzp2_f16", + "vuzp2q_f16", + "vuzp2_f32", + "vuzp2q_f64", + "vuzp2_s32", + "vuzp2q_s64", + "vuzp2_u32", + "vuzp2q_u64", + "vuzp2q_p64", + "vuzp2q_f32", + "vuzp2_s8", + "vuzp2q_s8", + "vuzp2_s16", + "vuzp2q_s16", + "vuzp2q_s32", + "vuzp2_u8", + "vuzp2q_u8", + "vuzp2_u16", + "vuzp2q_u16", + "vuzp2q_u32", + "vuzp2_p8", + "vuzp2q_p8", + "vuzp2_p16", + "vuzp2q_p16", + "vxarq_u64", + "vzip1_f16", + "vzip1q_f16", + "vzip1_f32", + "vzip1q_f32", + "vzip1q_f64", + "vzip1_s8", + "vzip1q_s8", + "vzip1_s16", + "vzip1q_s16", + "vzip1_s32", + "vzip1q_s32", + "vzip1q_s64", + "vzip1_u8", + "vzip1q_u8", + "vzip1_u16", + "vzip1q_u16", + "vzip1_u32", + "vzip1q_u32", + "vzip1q_u64", + "vzip1_p8", + "vzip1q_p8", + "vzip1_p16", + "vzip1q_p16", + "vzip1q_p64", + "vzip2_f16", + "vzip2q_f16", + "vzip2_f32", + "vzip2q_f32", + "vzip2q_f64", + "vzip2_s8", + "vzip2q_s8", + "vzip2_s16", + "vzip2q_s16", + "vzip2_s32", + "vzip2q_s32", + "vzip2q_s64", + "vzip2_u8", + "vzip2q_u8", + "vzip2_u16", + "vzip2q_u16", + "vzip2_u32", + "vzip2q_u32", + "vzip2q_u64", + "vzip2_p8", + "vzip2q_p8", + "vzip2_p16", + "vzip2q_p16", + "vzip2q_p64", + "__crc32b", + "__crc32cb", + "__crc32cd", + "__crc32ch", + "__crc32cw", + "__crc32d", + "__crc32h", + "__crc32w", + "vabal_s8", + "vabal_s16", + "vabal_s32", + "vabal_u8", + "vabal_u16", + "vabal_u32", + "vabd_f16", + "vabdq_f16", + "vabd_f32", + "vabdq_f32", + "vabd_s8", + "vabdq_s8", + "vabd_s16", + "vabdq_s16", + "vabd_s32", + "vabdq_s32", + "vabd_u8", + "vabdq_u8", + "vabd_u16", + "vabdq_u16", + "vabd_u32", + "vabdq_u32", + "vabdl_s8", + "vabdl_s16", + "vabdl_s32", + "vabdl_u8", + "vabdl_u16", + "vabdl_u32", + "vabs_f16", + "vabsq_f16", + "vabs_f32", + "vabsq_f32", + "vabs_s8", + "vabsq_s8", + "vabs_s16", + "vabsq_s16", + "vabs_s32", + "vabsq_s32", + "vabsh_f16", + "vadd_f16", + "vaddq_f16", + "vadd_p8", + "vaddq_p8", + "vadd_p16", + "vaddq_p16", + "vadd_p64", + "vaddq_p64", + "vaddh_f16", + "vaddhn_high_s16", + "vaddhn_high_s32", + "vaddhn_high_s64", + "vaddhn_high_u16", + "vaddhn_high_u32", + "vaddhn_high_u64", + "vaddhn_s16", + "vaddhn_s32", + "vaddhn_s64", + "vaddhn_u16", + "vaddhn_u32", + "vaddhn_u64", + "vaddq_p128", + "vaesdq_u8", + "vaeseq_u8", + "vaesimcq_u8", + "vaesmcq_u8", + "vbsl_f16", + "vbslq_f16", + "vcage_f16", + "vcageq_f16", + "vcage_f32", + "vcageq_f32", + "vcagt_f16", + "vcagtq_f16", + "vcagt_f32", + "vcagtq_f32", + "vcale_f16", + "vcaleq_f16", + "vcale_f32", + "vcaleq_f32", + "vcalt_f16", + "vcaltq_f16", + "vcalt_f32", + "vcaltq_f32", + "vceq_f16", + "vceqq_f16", + "vceq_p8", + "vceqq_p8", + "vcge_f16", + "vcgeq_f16", + "vcgez_f16", + "vcgezq_f16", + "vcgt_f16", + "vcgtq_f16", + "vcgtz_f16", + "vcgtzq_f16", + "vcle_f16", + "vcleq_f16", + "vclez_f16", + "vclezq_f16", + "vcls_s8", + "vclsq_s8", + "vcls_s16", + "vclsq_s16", + "vcls_s32", + "vclsq_s32", + "vcls_u8", + "vclsq_u8", + "vcls_u16", + "vclsq_u16", + "vcls_u32", + "vclsq_u32", + "vclt_f16", + "vcltq_f16", + "vcltz_f16", + "vcltzq_f16", + "vclz_s8", + "vclzq_s8", + "vclz_s16", + "vclzq_s16", + "vclz_s32", + "vclzq_s32", + "vclz_u16", + "vclz_u16", + "vclzq_u16", + "vclzq_u16", + "vclz_u32", + "vclz_u32", + "vclzq_u32", + "vclzq_u32", + "vclz_u8", + "vclz_u8", + "vclzq_u8", + "vclzq_u8", + "vcnt_s8", + "vcntq_s8", + "vcnt_u8", + "vcnt_u8", + "vcntq_u8", + "vcntq_u8", + "vcnt_p8", + "vcnt_p8", + "vcntq_p8", + "vcntq_p8", + "vcombine_f16", + "vcreate_f16", + "vcreate_f16", + "vcreate_f32", + "vcreate_f32", + "vcreate_s8", + "vcreate_s8", + "vcreate_s16", + "vcreate_s16", + "vcreate_s32", + "vcreate_s32", + "vcreate_s64", + "vcreate_u8", + "vcreate_u8", + "vcreate_u16", + "vcreate_u16", + "vcreate_u32", + "vcreate_u32", + "vcreate_u64", + "vcreate_p8", + "vcreate_p8", + "vcreate_p16", + "vcreate_p16", + "vcreate_p64", + "vcvt_f16_f32", + "vcvt_f16_s16", + "vcvtq_f16_s16", + "vcvt_f16_u16", + "vcvtq_f16_u16", + "vcvt_f32_f16", + "vcvt_f32_s32", + "vcvtq_f32_s32", + "vcvt_f32_u32", + "vcvtq_f32_u32", + "vcvt_n_f16_s16", + "vcvtq_n_f16_s16", + "vcvt_n_f16_u16", + "vcvtq_n_f16_u16", + "vcvt_n_f32_s32", + "vcvtq_n_f32_s32", + "vcvt_n_f32_s32", + "vcvtq_n_f32_s32", + "vcvt_n_f32_u32", + "vcvtq_n_f32_u32", + "vcvt_n_f32_u32", + "vcvtq_n_f32_u32", + "vcvt_n_s16_f16", + "vcvtq_n_s16_f16", + "vcvt_n_s32_f32", + "vcvtq_n_s32_f32", + "vcvt_n_s32_f32", + "vcvtq_n_s32_f32", + "vcvt_n_u16_f16", + "vcvtq_n_u16_f16", + "vcvt_n_u32_f32", + "vcvtq_n_u32_f32", + "vcvt_n_u32_f32", + "vcvtq_n_u32_f32", + "vcvt_s16_f16", + "vcvtq_s16_f16", + "vcvt_s32_f32", + "vcvtq_s32_f32", + "vcvt_u16_f16", + "vcvtq_u16_f16", + "vcvt_u32_f32", + "vcvtq_u32_f32", + "vdot_lane_s32", + "vdot_lane_s32", + "vdotq_lane_s32", + "vdotq_lane_s32", + "vdot_lane_u32", + "vdot_lane_u32", + "vdotq_lane_u32", + "vdotq_lane_u32", + "vdot_laneq_s32", + "vdot_laneq_s32", + "vdotq_laneq_s32", + "vdotq_laneq_s32", + "vdot_laneq_u32", + "vdot_laneq_u32", + "vdotq_laneq_u32", + "vdotq_laneq_u32", + "vdot_s32", + "vdotq_s32", + "vdot_u32", + "vdotq_u32", + "vdup_lane_f16", + "vdupq_lane_f16", + "vdup_lane_f32", + "vdup_lane_s32", + "vdup_lane_u32", + "vdupq_lane_f32", + "vdupq_lane_s32", + "vdupq_lane_u32", + "vdup_lane_p16", + "vdup_lane_s16", + "vdup_lane_u16", + "vdupq_lane_p16", + "vdupq_lane_s16", + "vdupq_lane_u16", + "vdup_lane_p8", + "vdup_lane_s8", + "vdup_lane_u8", + "vdupq_lane_p8", + "vdupq_lane_s8", + "vdupq_lane_u8", + "vdup_lane_s64", + "vdup_lane_u64", + "vdup_laneq_f16", + "vdupq_laneq_f16", + "vdup_laneq_f32", + "vdup_laneq_s32", + "vdup_laneq_u32", + "vdupq_laneq_f32", + "vdupq_laneq_s32", + "vdupq_laneq_u32", + "vdup_laneq_p16", + "vdup_laneq_s16", + "vdup_laneq_u16", + "vdupq_laneq_p16", + "vdupq_laneq_s16", + "vdupq_laneq_u16", + "vdup_laneq_p8", + "vdup_laneq_s8", + "vdup_laneq_u8", + "vdupq_laneq_p8", + "vdupq_laneq_s8", + "vdupq_laneq_u8", + "vdup_laneq_s64", + "vdup_laneq_u64", + "vdup_n_f16", + "vdupq_n_f16", + "vdupq_lane_s64", + "vdupq_lane_u64", + "vdupq_laneq_s64", + "vdupq_laneq_u64", + "vext_f16", + "vext_f32", + "vext_s32", + "vext_u32", + "vext_s8", + "vextq_s16", + "vext_u8", + "vextq_u16", + "vext_p8", + "vextq_p16", + "vextq_f16", + "vextq_f32", + "vext_s16", + "vextq_s32", + "vext_u16", + "vextq_u32", + "vext_p16", + "vextq_s64", + "vextq_u64", + "vextq_s8", + "vextq_u8", + "vextq_p8", + "vfma_f16", + "vfmaq_f16", + "vfma_f32", + "vfmaq_f32", + "vfma_n_f32", + "vfmaq_n_f32", + "vfms_f16", + "vfmsq_f16", + "vfms_f32", + "vfmsq_f32", + "vfms_n_f32", + "vfmsq_n_f32", + "vget_high_f16", + "vget_low_f16", + "vget_lane_f16", + "vgetq_lane_f16", + "vld1_dup_f16", + "vld1q_dup_f16", + "vld1_f16", + "vld1_f16", + "vld1q_f16", + "vld1q_f16", + "vld1_f16_x2", + "vld1_f16_x3", + "vld1_f16_x4", + "vld1q_f16_x2", + "vld1q_f16_x3", + "vld1q_f16_x4", + "vld1_f32_x2", + "vld1_f32_x3", + "vld1_f32_x4", + "vld1q_f32_x2", + "vld1q_f32_x3", + "vld1q_f32_x4", + "vld1_lane_f16", + "vld1q_lane_f16", + "vld1_p64_x2", + "vld1_p64_x3", + "vld1_p64_x4", + "vld1q_p64_x2", + "vld1q_p64_x3", + "vld1q_p64_x4", + "vld1_s8_x2", + "vld1_s8_x3", + "vld1_s8_x4", + "vld1q_s8_x2", + "vld1q_s8_x3", + "vld1q_s8_x4", + "vld1_s16_x2", + "vld1_s16_x3", + "vld1_s16_x4", + "vld1q_s16_x2", + "vld1q_s16_x3", + "vld1q_s16_x4", + "vld1_s32_x2", + "vld1_s32_x3", + "vld1_s32_x4", + "vld1q_s32_x2", + "vld1q_s32_x3", + "vld1q_s32_x4", + "vld1_s64_x2", + "vld1_s64_x3", + "vld1_s64_x4", + "vld1q_s64_x2", + "vld1q_s64_x3", + "vld1q_s64_x4", + "vld1_u8_x2", + "vld1_u8_x3", + "vld1_u8_x4", + "vld1q_u8_x2", + "vld1q_u8_x3", + "vld1q_u8_x4", + "vld1_u16_x2", + "vld1_u16_x3", + "vld1_u16_x4", + "vld1q_u16_x2", + "vld1q_u16_x3", + "vld1q_u16_x4", + "vld1_u32_x2", + "vld1_u32_x3", + "vld1_u32_x4", + "vld1q_u32_x2", + "vld1q_u32_x3", + "vld1q_u32_x4", + "vld1_u64_x2", + "vld1_u64_x3", + "vld1_u64_x4", + "vld1q_u64_x2", + "vld1q_u64_x3", + "vld1q_u64_x4", + "vld1_p8_x2", + "vld1_p8_x3", + "vld1_p8_x4", + "vld1q_p8_x2", + "vld1q_p8_x3", + "vld1q_p8_x4", + "vld1_p16_x2", + "vld1_p16_x3", + "vld1_p16_x4", + "vld1q_p16_x2", + "vld1q_p16_x3", + "vld1q_p16_x4", + "vld2_dup_f16", + "vld2q_dup_f16", + "vld2_dup_f16", + "vld2q_dup_f16", + "vld2_dup_f32", + "vld2q_dup_f32", + "vld2_dup_s8", + "vld2q_dup_s8", + "vld2_dup_s16", + "vld2q_dup_s16", + "vld2_dup_s32", + "vld2q_dup_s32", + "vld2_dup_f32", + "vld2q_dup_f32", + "vld2_dup_s8", + "vld2q_dup_s8", + "vld2_dup_s16", + "vld2q_dup_s16", + "vld2_dup_s32", + "vld2q_dup_s32", + "vld2_dup_p64", + "vld2_dup_s64", + "vld2_dup_s64", + "vld2_dup_u64", + "vld2_dup_u8", + "vld2_dup_u8", + "vld2q_dup_u8", + "vld2q_dup_u8", + "vld2_dup_u16", + "vld2_dup_u16", + "vld2q_dup_u16", + "vld2q_dup_u16", + "vld2_dup_u32", + "vld2_dup_u32", + "vld2q_dup_u32", + "vld2q_dup_u32", + "vld2_dup_p8", + "vld2_dup_p8", + "vld2q_dup_p8", + "vld2q_dup_p8", + "vld2_dup_p16", + "vld2_dup_p16", + "vld2q_dup_p16", + "vld2q_dup_p16", + "vld2_f16", + "vld2q_f16", + "vld2_f16", + "vld2q_f16", + "vld2_f32", + "vld2q_f32", + "vld2_s8", + "vld2q_s8", + "vld2_s16", + "vld2q_s16", + "vld2_s32", + "vld2q_s32", + "vld2_f32", + "vld2q_f32", + "vld2_s8", + "vld2q_s8", + "vld2_s16", + "vld2q_s16", + "vld2_s32", + "vld2q_s32", + "vld2_lane_f16", + "vld2q_lane_f16", + "vld2_lane_f16", + "vld2q_lane_f16", + "vld2_lane_f32", + "vld2q_lane_f32", + "vld2_lane_s8", + "vld2_lane_s16", + "vld2q_lane_s16", + "vld2_lane_s32", + "vld2q_lane_s32", + "vld2_lane_f32", + "vld2q_lane_f32", + "vld2q_lane_s16", + "vld2q_lane_s32", + "vld2_lane_s8", + "vld2_lane_s16", + "vld2_lane_s32", + "vld2_lane_u8", + "vld2_lane_u16", + "vld2q_lane_u16", + "vld2_lane_u32", + "vld2q_lane_u32", + "vld2_lane_p8", + "vld2_lane_p16", + "vld2q_lane_p16", + "vld2_p64", + "vld2_s64", + "vld2_s64", + "vld2_u64", + "vld2_u8", + "vld2q_u8", + "vld2_u16", + "vld2q_u16", + "vld2_u32", + "vld2q_u32", + "vld2_p8", + "vld2q_p8", + "vld2_p16", + "vld2q_p16", + "vld3_dup_f16", + "vld3q_dup_f16", + "vld3_dup_f16", + "vld3q_dup_f16", + "vld3_dup_f32", + "vld3q_dup_f32", + "vld3_dup_s8", + "vld3q_dup_s8", + "vld3_dup_s16", + "vld3q_dup_s16", + "vld3_dup_s32", + "vld3q_dup_s32", + "vld3_dup_s64", + "vld3_dup_f32", + "vld3q_dup_f32", + "vld3_dup_s8", + "vld3q_dup_s8", + "vld3_dup_s16", + "vld3q_dup_s16", + "vld3_dup_s32", + "vld3q_dup_s32", + "vld3_dup_p64", + "vld3_dup_s64", + "vld3_dup_u64", + "vld3_dup_u8", + "vld3_dup_u8", + "vld3q_dup_u8", + "vld3q_dup_u8", + "vld3_dup_u16", + "vld3_dup_u16", + "vld3q_dup_u16", + "vld3q_dup_u16", + "vld3_dup_u32", + "vld3_dup_u32", + "vld3q_dup_u32", + "vld3q_dup_u32", + "vld3_dup_p8", + "vld3_dup_p8", + "vld3q_dup_p8", + "vld3q_dup_p8", + "vld3_dup_p16", + "vld3_dup_p16", + "vld3q_dup_p16", + "vld3q_dup_p16", + "vld3_f16", + "vld3q_f16", + "vld3_f16", + "vld3q_f16", + "vld3_f32", + "vld3q_f32", + "vld3_s8", + "vld3q_s8", + "vld3_s16", + "vld3q_s16", + "vld3_s32", + "vld3q_s32", + "vld3_f32", + "vld3q_f32", + "vld3_s8", + "vld3q_s8", + "vld3_s16", + "vld3q_s16", + "vld3_s32", + "vld3q_s32", + "vld3_lane_f16", + "vld3q_lane_f16", + "vld3_lane_f16", + "vld3q_lane_f16", + "vld3_lane_f32", + "vld3q_lane_f32", + "vld3_lane_f32", + "vld3_lane_s8", + "vld3_lane_s16", + "vld3q_lane_s16", + "vld3_lane_s32", + "vld3q_lane_s32", + "vld3_lane_s8", + "vld3_lane_s16", + "vld3q_lane_s16", + "vld3_lane_s32", + "vld3q_lane_s32", + "vld3_lane_u8", + "vld3_lane_u16", + "vld3q_lane_u16", + "vld3_lane_u32", + "vld3q_lane_u32", + "vld3_lane_p8", + "vld3_lane_p16", + "vld3q_lane_p16", + "vld3_p64", + "vld3_s64", + "vld3_s64", + "vld3_u64", + "vld3_u8", + "vld3q_u8", + "vld3_u16", + "vld3q_u16", + "vld3_u32", + "vld3q_u32", + "vld3_p8", + "vld3q_p8", + "vld3_p16", + "vld3q_p16", + "vld3q_lane_f32", + "vld4_dup_f16", + "vld4q_dup_f16", + "vld4_dup_f16", + "vld4q_dup_f16", + "vld4_dup_f32", + "vld4q_dup_f32", + "vld4_dup_s8", + "vld4q_dup_s8", + "vld4_dup_s16", + "vld4q_dup_s16", + "vld4_dup_s32", + "vld4q_dup_s32", + "vld4_dup_f32", + "vld4q_dup_f32", + "vld4_dup_s8", + "vld4q_dup_s8", + "vld4_dup_s16", + "vld4q_dup_s16", + "vld4_dup_s32", + "vld4q_dup_s32", + "vld4_dup_s64", + "vld4_dup_p64", + "vld4_dup_s64", + "vld4_dup_u64", + "vld4_dup_u8", + "vld4_dup_u8", + "vld4q_dup_u8", + "vld4q_dup_u8", + "vld4_dup_u16", + "vld4_dup_u16", + "vld4q_dup_u16", + "vld4q_dup_u16", + "vld4_dup_u32", + "vld4_dup_u32", + "vld4q_dup_u32", + "vld4q_dup_u32", + "vld4_dup_p8", + "vld4_dup_p8", + "vld4q_dup_p8", + "vld4q_dup_p8", + "vld4_dup_p16", + "vld4_dup_p16", + "vld4q_dup_p16", + "vld4q_dup_p16", + "vld4_f16", + "vld4q_f16", + "vld4_f16", + "vld4q_f16", + "vld4_f32", + "vld4q_f32", + "vld4_s8", + "vld4q_s8", + "vld4_s16", + "vld4q_s16", + "vld4_s32", + "vld4q_s32", + "vld4_f32", + "vld4q_f32", + "vld4_s8", + "vld4q_s8", + "vld4_s16", + "vld4q_s16", + "vld4_s32", + "vld4q_s32", + "vld4_lane_f16", + "vld4q_lane_f16", + "vld4_lane_f16", + "vld4q_lane_f16", + "vld4_lane_f32", + "vld4q_lane_f32", + "vld4_lane_s8", + "vld4_lane_s16", + "vld4q_lane_s16", + "vld4_lane_s32", + "vld4q_lane_s32", + "vld4_lane_f32", + "vld4q_lane_f32", + "vld4_lane_s8", + "vld4_lane_s16", + "vld4q_lane_s16", + "vld4_lane_s32", + "vld4q_lane_s32", + "vld4_lane_u8", + "vld4_lane_u16", + "vld4q_lane_u16", + "vld4_lane_u32", + "vld4q_lane_u32", + "vld4_lane_p8", + "vld4_lane_p16", + "vld4q_lane_p16", + "vld4_p64", + "vld4_s64", + "vld4_s64", + "vld4_u64", + "vld4_u8", + "vld4q_u8", + "vld4_u16", + "vld4q_u16", + "vld4_u32", + "vld4q_u32", + "vld4_p8", + "vld4q_p8", + "vld4_p16", + "vld4q_p16", + "vmax_f16", + "vmaxq_f16", + "vmax_f32", + "vmaxq_f32", + "vmax_s8", + "vmaxq_s8", + "vmax_s16", + "vmaxq_s16", + "vmax_s32", + "vmaxq_s32", + "vmax_u8", + "vmaxq_u8", + "vmax_u16", + "vmaxq_u16", + "vmax_u32", + "vmaxq_u32", + "vmaxnm_f16", + "vmaxnmq_f16", + "vmaxnm_f32", + "vmaxnmq_f32", + "vmin_f16", + "vminq_f16", + "vmin_f32", + "vminq_f32", + "vmin_s8", + "vminq_s8", + "vmin_s16", + "vminq_s16", + "vmin_s32", + "vminq_s32", + "vmin_u8", + "vminq_u8", + "vmin_u16", + "vminq_u16", + "vmin_u32", + "vminq_u32", + "vminnm_f16", + "vminnmq_f16", + "vminnm_f32", + "vminnmq_f32", + "vmla_f32", + "vmlaq_f32", + "vmla_lane_f32", + "vmla_laneq_f32", + "vmlaq_lane_f32", + "vmlaq_laneq_f32", + "vmla_lane_s16", + "vmla_lane_u16", + "vmla_laneq_s16", + "vmla_laneq_u16", + "vmlaq_lane_s16", + "vmlaq_lane_u16", + "vmlaq_laneq_s16", + "vmlaq_laneq_u16", + "vmla_lane_s32", + "vmla_lane_u32", + "vmla_laneq_s32", + "vmla_laneq_u32", + "vmlaq_lane_s32", + "vmlaq_lane_u32", + "vmlaq_laneq_s32", + "vmlaq_laneq_u32", + "vmla_n_f32", + "vmlaq_n_f32", + "vmla_n_s16", + "vmlaq_n_s16", + "vmla_n_u16", + "vmlaq_n_u16", + "vmla_n_s32", + "vmlaq_n_s32", + "vmla_n_u32", + "vmlaq_n_u32", + "vmla_s8", + "vmlaq_s8", + "vmla_s16", + "vmlaq_s16", + "vmla_s32", + "vmlaq_s32", + "vmla_u8", + "vmlaq_u8", + "vmla_u16", + "vmlaq_u16", + "vmla_u32", + "vmlaq_u32", + "vmlal_lane_s16", + "vmlal_laneq_s16", + "vmlal_lane_s32", + "vmlal_laneq_s32", + "vmlal_lane_u16", + "vmlal_laneq_u16", + "vmlal_lane_u32", + "vmlal_laneq_u32", + "vmlal_n_s16", + "vmlal_n_s32", + "vmlal_n_u16", + "vmlal_n_u32", + "vmlal_s8", + "vmlal_s16", + "vmlal_s32", + "vmlal_u8", + "vmlal_u16", + "vmlal_u32", + "vmls_f32", + "vmlsq_f32", + "vmls_lane_f32", + "vmls_laneq_f32", + "vmlsq_lane_f32", + "vmlsq_laneq_f32", + "vmls_lane_s16", + "vmls_lane_u16", + "vmls_laneq_s16", + "vmls_laneq_u16", + "vmlsq_lane_s16", + "vmlsq_lane_u16", + "vmlsq_laneq_s16", + "vmlsq_laneq_u16", + "vmls_lane_s32", + "vmls_lane_u32", + "vmls_laneq_s32", + "vmls_laneq_u32", + "vmlsq_lane_s32", + "vmlsq_lane_u32", + "vmlsq_laneq_s32", + "vmlsq_laneq_u32", + "vmls_n_f32", + "vmlsq_n_f32", + "vmls_n_s16", + "vmlsq_n_s16", + "vmls_n_u16", + "vmlsq_n_u16", + "vmls_n_s32", + "vmlsq_n_s32", + "vmls_n_u32", + "vmlsq_n_u32", + "vmls_s8", + "vmlsq_s8", + "vmls_s16", + "vmlsq_s16", + "vmls_s32", + "vmlsq_s32", + "vmls_u8", + "vmlsq_u8", + "vmls_u16", + "vmlsq_u16", + "vmls_u32", + "vmlsq_u32", + "vmlsl_lane_s16", + "vmlsl_laneq_s16", + "vmlsl_lane_s32", + "vmlsl_laneq_s32", + "vmlsl_lane_u16", + "vmlsl_laneq_u16", + "vmlsl_lane_u32", + "vmlsl_laneq_u32", + "vmlsl_n_s16", + "vmlsl_n_s32", + "vmlsl_n_u16", + "vmlsl_n_u32", + "vmlsl_s8", + "vmlsl_s16", + "vmlsl_s32", + "vmlsl_u8", + "vmlsl_u16", + "vmlsl_u32", + "vmmlaq_s32", + "vmmlaq_u32", + "vmov_n_f16", + "vmovq_n_f16", + "vmul_f16", + "vmulq_f16", + "vmul_lane_f16", + "vmulq_lane_f16", + "vmul_lane_f32", + "vmul_laneq_f32", + "vmulq_lane_f32", + "vmulq_laneq_f32", + "vmul_lane_s16", + "vmulq_lane_s16", + "vmul_lane_s32", + "vmulq_lane_s32", + "vmul_lane_u16", + "vmulq_lane_u16", + "vmul_lane_u32", + "vmulq_lane_u32", + "vmul_laneq_s16", + "vmulq_laneq_s16", + "vmul_laneq_s32", + "vmulq_laneq_s32", + "vmul_laneq_u16", + "vmulq_laneq_u16", + "vmul_laneq_u32", + "vmulq_laneq_u32", + "vmul_n_f16", + "vmulq_n_f16", + "vmul_n_f32", + "vmulq_n_f32", + "vmul_n_s16", + "vmulq_n_s16", + "vmul_n_s32", + "vmulq_n_s32", + "vmul_n_u16", + "vmulq_n_u16", + "vmul_n_u32", + "vmulq_n_u32", + "vmul_p8", + "vmulq_p8", + "vmull_lane_s16", + "vmull_laneq_s16", + "vmull_lane_s32", + "vmull_laneq_s32", + "vmull_lane_u16", + "vmull_laneq_u16", + "vmull_lane_u32", + "vmull_laneq_u32", + "vmull_n_s16", + "vmull_n_s32", + "vmull_n_u16", + "vmull_n_u32", + "vmull_p8", + "vmull_s16", + "vmull_s32", + "vmull_s8", + "vmull_u8", + "vmull_u16", + "vmull_u32", + "vneg_f16", + "vnegq_f16", + "vneg_f32", + "vnegq_f32", + "vneg_s8", + "vnegq_s8", + "vneg_s16", + "vnegq_s16", + "vneg_s32", + "vnegq_s32", + "vpadal_s8", + "vpadalq_s8", + "vpadal_s16", + "vpadalq_s16", + "vpadal_s32", + "vpadalq_s32", + "vpadal_u8", + "vpadalq_u8", + "vpadal_u16", + "vpadalq_u16", + "vpadal_u32", + "vpadalq_u32", + "vpadd_f16", + "vpadd_f32", + "vpadd_s8", + "vpadd_s16", + "vpadd_s32", + "vpadd_u8", + "vpadd_u8", + "vpadd_u16", + "vpadd_u16", + "vpadd_u32", + "vpadd_u32", + "vpaddl_s8", + "vpaddlq_s8", + "vpaddl_s16", + "vpaddlq_s16", + "vpaddl_s32", + "vpaddlq_s32", + "vpaddl_u8", + "vpaddlq_u8", + "vpaddl_u16", + "vpaddlq_u16", + "vpaddl_u32", + "vpaddlq_u32", + "vpmax_f32", + "vpmax_s8", + "vpmax_s16", + "vpmax_s32", + "vpmax_u8", + "vpmax_u16", + "vpmax_u32", + "vpmin_f32", + "vpmin_s8", + "vpmin_s16", + "vpmin_s32", + "vpmin_u8", + "vpmin_u16", + "vpmin_u32", + "vqabs_s8", + "vqabsq_s8", + "vqabs_s16", + "vqabsq_s16", + "vqabs_s32", + "vqabsq_s32", + "vqadd_s64", + "vqaddq_s64", + "vqadd_u64", + "vqaddq_u64", + "vqdmlal_lane_s16", + "vqdmlal_lane_s32", + "vqdmlal_n_s16", + "vqdmlal_n_s32", + "vqdmlal_s16", + "vqdmlal_s32", + "vqdmlsl_lane_s16", + "vqdmlsl_lane_s32", + "vqdmlsl_n_s16", + "vqdmlsl_n_s32", + "vqdmlsl_s16", + "vqdmlsl_s32", + "vqdmulh_laneq_s16", + "vqdmulhq_laneq_s16", + "vqdmulh_laneq_s32", + "vqdmulhq_laneq_s32", + "vqdmulh_n_s16", + "vqdmulhq_n_s16", + "vqdmulh_n_s32", + "vqdmulhq_n_s32", + "vqdmulh_s16", + "vqdmulhq_s16", + "vqdmulh_s32", + "vqdmulhq_s32", + "vqdmull_lane_s16", + "vqdmull_lane_s32", + "vqdmull_n_s16", + "vqdmull_n_s32", + "vqdmull_s16", + "vqdmull_s32", + "vqmovn_s16", + "vqmovn_s32", + "vqmovn_s64", + "vqmovn_u16", + "vqmovn_u32", + "vqmovn_u64", + "vqmovun_s16", + "vqmovun_s32", + "vqmovun_s64", + "vqneg_s8", + "vqnegq_s8", + "vqneg_s16", + "vqnegq_s16", + "vqneg_s32", + "vqnegq_s32", + "vqrdmulh_lane_s16", + "vqrdmulh_lane_s32", + "vqrdmulh_laneq_s16", + "vqrdmulh_laneq_s32", + "vqrdmulhq_lane_s16", + "vqrdmulhq_lane_s32", + "vqrdmulhq_laneq_s16", + "vqrdmulhq_laneq_s32", + "vqrdmulh_n_s16", + "vqrdmulhq_n_s16", + "vqrdmulh_n_s32", + "vqrdmulhq_n_s32", + "vqrdmulh_s16", + "vqrdmulhq_s16", + "vqrdmulh_s32", + "vqrdmulhq_s32", + "vqrshl_s8", + "vqrshlq_s8", + "vqrshl_s16", + "vqrshlq_s16", + "vqrshl_s32", + "vqrshlq_s32", + "vqrshl_s64", + "vqrshlq_s64", + "vqrshl_u8", + "vqrshlq_u8", + "vqrshl_u16", + "vqrshlq_u16", + "vqrshl_u32", + "vqrshlq_u32", + "vqrshl_u64", + "vqrshlq_u64", + "vqrshrn_n_s16", + "vqrshrn_n_s32", + "vqrshrn_n_s64", + "vqrshrn_n_s16", + "vqrshrn_n_s32", + "vqrshrn_n_s64", + "vqrshrn_n_u16", + "vqrshrn_n_u32", + "vqrshrn_n_u64", + "vqrshrn_n_u16", + "vqrshrn_n_u32", + "vqrshrn_n_u64", + "vqrshrun_n_s16", + "vqrshrun_n_s32", + "vqrshrun_n_s64", + "vqrshrun_n_s16", + "vqrshrun_n_s32", + "vqrshrun_n_s64", + "vqshl_n_s8", + "vqshlq_n_s8", + "vqshl_n_s16", + "vqshlq_n_s16", + "vqshl_n_s32", + "vqshlq_n_s32", + "vqshl_n_s64", + "vqshlq_n_s64", + "vqshl_n_u8", + "vqshlq_n_u8", + "vqshl_n_u16", + "vqshlq_n_u16", + "vqshl_n_u32", + "vqshlq_n_u32", + "vqshl_n_u64", + "vqshlq_n_u64", + "vqshl_s8", + "vqshlq_s8", + "vqshl_s16", + "vqshlq_s16", + "vqshl_s32", + "vqshlq_s32", + "vqshl_s64", + "vqshlq_s64", + "vqshl_u8", + "vqshlq_u8", + "vqshl_u16", + "vqshlq_u16", + "vqshl_u32", + "vqshlq_u32", + "vqshl_u64", + "vqshlq_u64", + "vqshlu_n_s8", + "vqshluq_n_s8", + "vqshlu_n_s16", + "vqshluq_n_s16", + "vqshlu_n_s32", + "vqshluq_n_s32", + "vqshlu_n_s64", + "vqshluq_n_s64", + "vqshlu_n_s8", + "vqshluq_n_s8", + "vqshlu_n_s16", + "vqshluq_n_s16", + "vqshlu_n_s32", + "vqshluq_n_s32", + "vqshlu_n_s64", + "vqshluq_n_s64", + "vqshrn_n_s16", + "vqshrn_n_s32", + "vqshrn_n_s64", + "vqshrn_n_s16", + "vqshrn_n_s32", + "vqshrn_n_s64", + "vqshrn_n_u16", + "vqshrn_n_u32", + "vqshrn_n_u64", + "vqshrn_n_u16", + "vqshrn_n_u32", + "vqshrn_n_u64", + "vqshrun_n_s16", + "vqshrun_n_s32", + "vqshrun_n_s64", + "vqshrun_n_s16", + "vqshrun_n_s32", + "vqshrun_n_s64", + "vqsub_s64", + "vqsubq_s64", + "vqsub_u64", + "vqsubq_u64", + "vraddhn_high_s16", + "vraddhn_high_s32", + "vraddhn_high_s64", + "vraddhn_high_u16", + "vraddhn_high_u32", + "vraddhn_high_u64", + "vraddhn_s16", + "vraddhn_s32", + "vraddhn_s64", + "vraddhn_u16", + "vraddhn_u16", + "vraddhn_u32", + "vraddhn_u32", + "vraddhn_u64", + "vraddhn_u64", + "vrecpe_f16", + "vrecpeq_f16", + "vrecpe_f32", + "vrecpeq_f32", + "vrecpe_u32", + "vrecpeq_u32", + "vrecps_f16", + "vrecpsq_f16", + "vrecps_f32", + "vrecpsq_f32", + "vreinterpret_f32_f16", + "vreinterpret_f32_f16", + "vreinterpret_s8_f16", + "vreinterpret_s8_f16", + "vreinterpret_s16_f16", + "vreinterpret_s16_f16", + "vreinterpret_s32_f16", + "vreinterpret_s32_f16", + "vreinterpret_s64_f16", + "vreinterpret_s64_f16", + "vreinterpret_u8_f16", + "vreinterpret_u8_f16", + "vreinterpret_u16_f16", + "vreinterpret_u16_f16", + "vreinterpret_u32_f16", + "vreinterpret_u32_f16", + "vreinterpret_u64_f16", + "vreinterpret_u64_f16", + "vreinterpret_p8_f16", + "vreinterpret_p8_f16", + "vreinterpret_p16_f16", + "vreinterpret_p16_f16", + "vreinterpretq_f32_f16", + "vreinterpretq_f32_f16", + "vreinterpretq_s8_f16", + "vreinterpretq_s8_f16", + "vreinterpretq_s16_f16", + "vreinterpretq_s16_f16", + "vreinterpretq_s32_f16", + "vreinterpretq_s32_f16", + "vreinterpretq_s64_f16", + "vreinterpretq_s64_f16", + "vreinterpretq_u8_f16", + "vreinterpretq_u8_f16", + "vreinterpretq_u16_f16", + "vreinterpretq_u16_f16", + "vreinterpretq_u32_f16", + "vreinterpretq_u32_f16", + "vreinterpretq_u64_f16", + "vreinterpretq_u64_f16", + "vreinterpretq_p8_f16", + "vreinterpretq_p8_f16", + "vreinterpretq_p16_f16", + "vreinterpretq_p16_f16", + "vreinterpret_f16_f32", + "vreinterpret_f16_f32", + "vreinterpretq_f16_f32", + "vreinterpretq_f16_f32", + "vreinterpret_f16_s8", + "vreinterpret_f16_s8", + "vreinterpretq_f16_s8", + "vreinterpretq_f16_s8", + "vreinterpret_f16_s16", + "vreinterpret_f16_s16", + "vreinterpretq_f16_s16", + "vreinterpretq_f16_s16", + "vreinterpret_f16_s32", + "vreinterpret_f16_s32", + "vreinterpretq_f16_s32", + "vreinterpretq_f16_s32", + "vreinterpret_f16_s64", + "vreinterpret_f16_s64", + "vreinterpretq_f16_s64", + "vreinterpretq_f16_s64", + "vreinterpret_f16_u8", + "vreinterpret_f16_u8", + "vreinterpretq_f16_u8", + "vreinterpretq_f16_u8", + "vreinterpret_f16_u16", + "vreinterpret_f16_u16", + "vreinterpretq_f16_u16", + "vreinterpretq_f16_u16", + "vreinterpret_f16_u32", + "vreinterpret_f16_u32", + "vreinterpretq_f16_u32", + "vreinterpretq_f16_u32", + "vreinterpret_f16_u64", + "vreinterpret_f16_u64", + "vreinterpretq_f16_u64", + "vreinterpretq_f16_u64", + "vreinterpret_f16_p8", + "vreinterpret_f16_p8", + "vreinterpretq_f16_p8", + "vreinterpretq_f16_p8", + "vreinterpret_f16_p16", + "vreinterpret_f16_p16", + "vreinterpretq_f16_p16", + "vreinterpretq_f16_p16", + "vreinterpretq_f16_p128", + "vreinterpretq_f16_p128", + "vreinterpret_p64_f16", + "vreinterpret_p64_f16", + "vreinterpretq_p128_f16", + "vreinterpretq_p128_f16", + "vreinterpretq_p64_f16", + "vreinterpretq_p64_f16", + "vreinterpret_f16_p64", + "vreinterpret_f16_p64", + "vreinterpretq_f16_p64", + "vreinterpretq_f16_p64", + "vreinterpretq_f32_p128", + "vreinterpretq_f32_p128", + "vreinterpret_s8_f32", + "vreinterpret_s8_f32", + "vreinterpret_s16_f32", + "vreinterpret_s16_f32", + "vreinterpret_s32_f32", + "vreinterpret_s32_f32", + "vreinterpret_s64_f32", + "vreinterpret_s64_f32", + "vreinterpret_u8_f32", + "vreinterpret_u8_f32", + "vreinterpret_u16_f32", + "vreinterpret_u16_f32", + "vreinterpret_u32_f32", + "vreinterpret_u32_f32", + "vreinterpret_u64_f32", + "vreinterpret_u64_f32", + "vreinterpret_p8_f32", + "vreinterpret_p8_f32", + "vreinterpret_p16_f32", + "vreinterpret_p16_f32", + "vreinterpretq_p128_f32", + "vreinterpretq_p128_f32", + "vreinterpretq_s8_f32", + "vreinterpretq_s8_f32", + "vreinterpretq_s16_f32", + "vreinterpretq_s16_f32", + "vreinterpretq_s32_f32", + "vreinterpretq_s32_f32", + "vreinterpretq_s64_f32", + "vreinterpretq_s64_f32", + "vreinterpretq_u8_f32", + "vreinterpretq_u8_f32", + "vreinterpretq_u16_f32", + "vreinterpretq_u16_f32", + "vreinterpretq_u32_f32", + "vreinterpretq_u32_f32", + "vreinterpretq_u64_f32", + "vreinterpretq_u64_f32", + "vreinterpretq_p8_f32", + "vreinterpretq_p8_f32", + "vreinterpretq_p16_f32", + "vreinterpretq_p16_f32", + "vreinterpret_f32_s8", + "vreinterpret_f32_s8", + "vreinterpret_s16_s8", + "vreinterpret_s16_s8", + "vreinterpret_s32_s8", + "vreinterpret_s32_s8", + "vreinterpret_s64_s8", + "vreinterpret_s64_s8", + "vreinterpret_u8_s8", + "vreinterpret_u8_s8", + "vreinterpret_u16_s8", + "vreinterpret_u16_s8", + "vreinterpret_u32_s8", + "vreinterpret_u32_s8", + "vreinterpret_u64_s8", + "vreinterpret_u64_s8", + "vreinterpret_p8_s8", + "vreinterpret_p8_s8", + "vreinterpret_p16_s8", + "vreinterpret_p16_s8", + "vreinterpretq_f32_s8", + "vreinterpretq_f32_s8", + "vreinterpretq_s16_s8", + "vreinterpretq_s16_s8", + "vreinterpretq_s32_s8", + "vreinterpretq_s32_s8", + "vreinterpretq_s64_s8", + "vreinterpretq_s64_s8", + "vreinterpretq_u8_s8", + "vreinterpretq_u8_s8", + "vreinterpretq_u16_s8", + "vreinterpretq_u16_s8", + "vreinterpretq_u32_s8", + "vreinterpretq_u32_s8", + "vreinterpretq_u64_s8", + "vreinterpretq_u64_s8", + "vreinterpretq_p8_s8", + "vreinterpretq_p8_s8", + "vreinterpretq_p16_s8", + "vreinterpretq_p16_s8", + "vreinterpret_f32_s16", + "vreinterpret_f32_s16", + "vreinterpret_s8_s16", + "vreinterpret_s8_s16", + "vreinterpret_s32_s16", + "vreinterpret_s32_s16", + "vreinterpret_s64_s16", + "vreinterpret_s64_s16", + "vreinterpret_u8_s16", + "vreinterpret_u8_s16", + "vreinterpret_u16_s16", + "vreinterpret_u16_s16", + "vreinterpret_u32_s16", + "vreinterpret_u32_s16", + "vreinterpret_u64_s16", + "vreinterpret_u64_s16", + "vreinterpret_p8_s16", + "vreinterpret_p8_s16", + "vreinterpret_p16_s16", + "vreinterpret_p16_s16", + "vreinterpretq_f32_s16", + "vreinterpretq_f32_s16", + "vreinterpretq_s8_s16", + "vreinterpretq_s8_s16", + "vreinterpretq_s32_s16", + "vreinterpretq_s32_s16", + "vreinterpretq_s64_s16", + "vreinterpretq_s64_s16", + "vreinterpretq_u8_s16", + "vreinterpretq_u8_s16", + "vreinterpretq_u16_s16", + "vreinterpretq_u16_s16", + "vreinterpretq_u32_s16", + "vreinterpretq_u32_s16", + "vreinterpretq_u64_s16", + "vreinterpretq_u64_s16", + "vreinterpretq_p8_s16", + "vreinterpretq_p8_s16", + "vreinterpretq_p16_s16", + "vreinterpretq_p16_s16", + "vreinterpret_f32_s32", + "vreinterpret_f32_s32", + "vreinterpret_s8_s32", + "vreinterpret_s8_s32", + "vreinterpret_s16_s32", + "vreinterpret_s16_s32", + "vreinterpret_s64_s32", + "vreinterpret_s64_s32", + "vreinterpret_u8_s32", + "vreinterpret_u8_s32", + "vreinterpret_u16_s32", + "vreinterpret_u16_s32", + "vreinterpret_u32_s32", + "vreinterpret_u32_s32", + "vreinterpret_u64_s32", + "vreinterpret_u64_s32", + "vreinterpret_p8_s32", + "vreinterpret_p8_s32", + "vreinterpret_p16_s32", + "vreinterpret_p16_s32", + "vreinterpretq_f32_s32", + "vreinterpretq_f32_s32", + "vreinterpretq_s8_s32", + "vreinterpretq_s8_s32", + "vreinterpretq_s16_s32", + "vreinterpretq_s16_s32", + "vreinterpretq_s64_s32", + "vreinterpretq_s64_s32", + "vreinterpretq_u8_s32", + "vreinterpretq_u8_s32", + "vreinterpretq_u16_s32", + "vreinterpretq_u16_s32", + "vreinterpretq_u32_s32", + "vreinterpretq_u32_s32", + "vreinterpretq_u64_s32", + "vreinterpretq_u64_s32", + "vreinterpretq_p8_s32", + "vreinterpretq_p8_s32", + "vreinterpretq_p16_s32", + "vreinterpretq_p16_s32", + "vreinterpret_f32_s64", + "vreinterpret_f32_s64", + "vreinterpret_s8_s64", + "vreinterpret_s8_s64", + "vreinterpret_s16_s64", + "vreinterpret_s16_s64", + "vreinterpret_s32_s64", + "vreinterpret_s32_s64", + "vreinterpret_u8_s64", + "vreinterpret_u8_s64", + "vreinterpret_u16_s64", + "vreinterpret_u16_s64", + "vreinterpret_u32_s64", + "vreinterpret_u32_s64", + "vreinterpret_u64_s64", + "vreinterpret_p8_s64", + "vreinterpret_p8_s64", + "vreinterpret_p16_s64", + "vreinterpret_p16_s64", + "vreinterpretq_f32_s64", + "vreinterpretq_f32_s64", + "vreinterpretq_s8_s64", + "vreinterpretq_s8_s64", + "vreinterpretq_s16_s64", + "vreinterpretq_s16_s64", + "vreinterpretq_s32_s64", + "vreinterpretq_s32_s64", + "vreinterpretq_u8_s64", + "vreinterpretq_u8_s64", + "vreinterpretq_u16_s64", + "vreinterpretq_u16_s64", + "vreinterpretq_u32_s64", + "vreinterpretq_u32_s64", + "vreinterpretq_u64_s64", + "vreinterpretq_u64_s64", + "vreinterpretq_p8_s64", + "vreinterpretq_p8_s64", + "vreinterpretq_p16_s64", + "vreinterpretq_p16_s64", + "vreinterpret_f32_u8", + "vreinterpret_f32_u8", + "vreinterpret_s8_u8", + "vreinterpret_s8_u8", + "vreinterpret_s16_u8", + "vreinterpret_s16_u8", + "vreinterpret_s32_u8", + "vreinterpret_s32_u8", + "vreinterpret_s64_u8", + "vreinterpret_s64_u8", + "vreinterpret_u16_u8", + "vreinterpret_u16_u8", + "vreinterpret_u32_u8", + "vreinterpret_u32_u8", + "vreinterpret_u64_u8", + "vreinterpret_u64_u8", + "vreinterpret_p8_u8", + "vreinterpret_p8_u8", + "vreinterpret_p16_u8", + "vreinterpret_p16_u8", + "vreinterpretq_f32_u8", + "vreinterpretq_f32_u8", + "vreinterpretq_s8_u8", + "vreinterpretq_s8_u8", + "vreinterpretq_s16_u8", + "vreinterpretq_s16_u8", + "vreinterpretq_s32_u8", + "vreinterpretq_s32_u8", + "vreinterpretq_s64_u8", + "vreinterpretq_s64_u8", + "vreinterpretq_u16_u8", + "vreinterpretq_u16_u8", + "vreinterpretq_u32_u8", + "vreinterpretq_u32_u8", + "vreinterpretq_u64_u8", + "vreinterpretq_u64_u8", + "vreinterpretq_p8_u8", + "vreinterpretq_p8_u8", + "vreinterpretq_p16_u8", + "vreinterpretq_p16_u8", + "vreinterpret_f32_u16", + "vreinterpret_f32_u16", + "vreinterpret_s8_u16", + "vreinterpret_s8_u16", + "vreinterpret_s16_u16", + "vreinterpret_s16_u16", + "vreinterpret_s32_u16", + "vreinterpret_s32_u16", + "vreinterpret_s64_u16", + "vreinterpret_s64_u16", + "vreinterpret_u8_u16", + "vreinterpret_u8_u16", + "vreinterpret_u32_u16", + "vreinterpret_u32_u16", + "vreinterpret_u64_u16", + "vreinterpret_u64_u16", + "vreinterpret_p8_u16", + "vreinterpret_p8_u16", + "vreinterpret_p16_u16", + "vreinterpret_p16_u16", + "vreinterpretq_f32_u16", + "vreinterpretq_f32_u16", + "vreinterpretq_s8_u16", + "vreinterpretq_s8_u16", + "vreinterpretq_s16_u16", + "vreinterpretq_s16_u16", + "vreinterpretq_s32_u16", + "vreinterpretq_s32_u16", + "vreinterpretq_s64_u16", + "vreinterpretq_s64_u16", + "vreinterpretq_u8_u16", + "vreinterpretq_u8_u16", + "vreinterpretq_u32_u16", + "vreinterpretq_u32_u16", + "vreinterpretq_u64_u16", + "vreinterpretq_u64_u16", + "vreinterpretq_p8_u16", + "vreinterpretq_p8_u16", + "vreinterpretq_p16_u16", + "vreinterpretq_p16_u16", + "vreinterpret_f32_u32", + "vreinterpret_f32_u32", + "vreinterpret_s8_u32", + "vreinterpret_s8_u32", + "vreinterpret_s16_u32", + "vreinterpret_s16_u32", + "vreinterpret_s32_u32", + "vreinterpret_s32_u32", + "vreinterpret_s64_u32", + "vreinterpret_s64_u32", + "vreinterpret_u8_u32", + "vreinterpret_u8_u32", + "vreinterpret_u16_u32", + "vreinterpret_u16_u32", + "vreinterpret_u64_u32", + "vreinterpret_u64_u32", + "vreinterpret_p8_u32", + "vreinterpret_p8_u32", + "vreinterpret_p16_u32", + "vreinterpret_p16_u32", + "vreinterpretq_f32_u32", + "vreinterpretq_f32_u32", + "vreinterpretq_s8_u32", + "vreinterpretq_s8_u32", + "vreinterpretq_s16_u32", + "vreinterpretq_s16_u32", + "vreinterpretq_s32_u32", + "vreinterpretq_s32_u32", + "vreinterpretq_s64_u32", + "vreinterpretq_s64_u32", + "vreinterpretq_u8_u32", + "vreinterpretq_u8_u32", + "vreinterpretq_u16_u32", + "vreinterpretq_u16_u32", + "vreinterpretq_u64_u32", + "vreinterpretq_u64_u32", + "vreinterpretq_p8_u32", + "vreinterpretq_p8_u32", + "vreinterpretq_p16_u32", + "vreinterpretq_p16_u32", + "vreinterpret_f32_u64", + "vreinterpret_f32_u64", + "vreinterpret_s8_u64", + "vreinterpret_s8_u64", + "vreinterpret_s16_u64", + "vreinterpret_s16_u64", + "vreinterpret_s32_u64", + "vreinterpret_s32_u64", + "vreinterpret_s64_u64", + "vreinterpret_u8_u64", + "vreinterpret_u8_u64", + "vreinterpret_u16_u64", + "vreinterpret_u16_u64", + "vreinterpret_u32_u64", + "vreinterpret_u32_u64", + "vreinterpret_p8_u64", + "vreinterpret_p8_u64", + "vreinterpret_p16_u64", + "vreinterpret_p16_u64", + "vreinterpretq_f32_u64", + "vreinterpretq_f32_u64", + "vreinterpretq_s8_u64", + "vreinterpretq_s8_u64", + "vreinterpretq_s16_u64", + "vreinterpretq_s16_u64", + "vreinterpretq_s32_u64", + "vreinterpretq_s32_u64", + "vreinterpretq_s64_u64", + "vreinterpretq_s64_u64", + "vreinterpretq_u8_u64", + "vreinterpretq_u8_u64", + "vreinterpretq_u16_u64", + "vreinterpretq_u16_u64", + "vreinterpretq_u32_u64", + "vreinterpretq_u32_u64", + "vreinterpretq_p8_u64", + "vreinterpretq_p8_u64", + "vreinterpretq_p16_u64", + "vreinterpretq_p16_u64", + "vreinterpret_f32_p8", + "vreinterpret_f32_p8", + "vreinterpret_s8_p8", + "vreinterpret_s8_p8", + "vreinterpret_s16_p8", + "vreinterpret_s16_p8", + "vreinterpret_s32_p8", + "vreinterpret_s32_p8", + "vreinterpret_s64_p8", + "vreinterpret_s64_p8", + "vreinterpret_u8_p8", + "vreinterpret_u8_p8", + "vreinterpret_u16_p8", + "vreinterpret_u16_p8", + "vreinterpret_u32_p8", + "vreinterpret_u32_p8", + "vreinterpret_u64_p8", + "vreinterpret_u64_p8", + "vreinterpret_p16_p8", + "vreinterpret_p16_p8", + "vreinterpretq_f32_p8", + "vreinterpretq_f32_p8", + "vreinterpretq_s8_p8", + "vreinterpretq_s8_p8", + "vreinterpretq_s16_p8", + "vreinterpretq_s16_p8", + "vreinterpretq_s32_p8", + "vreinterpretq_s32_p8", + "vreinterpretq_s64_p8", + "vreinterpretq_s64_p8", + "vreinterpretq_u8_p8", + "vreinterpretq_u8_p8", + "vreinterpretq_u16_p8", + "vreinterpretq_u16_p8", + "vreinterpretq_u32_p8", + "vreinterpretq_u32_p8", + "vreinterpretq_u64_p8", + "vreinterpretq_u64_p8", + "vreinterpretq_p16_p8", + "vreinterpretq_p16_p8", + "vreinterpret_f32_p16", + "vreinterpret_f32_p16", + "vreinterpret_s8_p16", + "vreinterpret_s8_p16", + "vreinterpret_s16_p16", + "vreinterpret_s16_p16", + "vreinterpret_s32_p16", + "vreinterpret_s32_p16", + "vreinterpret_s64_p16", + "vreinterpret_s64_p16", + "vreinterpret_u8_p16", + "vreinterpret_u8_p16", + "vreinterpret_u16_p16", + "vreinterpret_u16_p16", + "vreinterpret_u32_p16", + "vreinterpret_u32_p16", + "vreinterpret_u64_p16", + "vreinterpret_u64_p16", + "vreinterpret_p8_p16", + "vreinterpret_p8_p16", + "vreinterpretq_f32_p16", + "vreinterpretq_f32_p16", + "vreinterpretq_s8_p16", + "vreinterpretq_s8_p16", + "vreinterpretq_s16_p16", + "vreinterpretq_s16_p16", + "vreinterpretq_s32_p16", + "vreinterpretq_s32_p16", + "vreinterpretq_s64_p16", + "vreinterpretq_s64_p16", + "vreinterpretq_u8_p16", + "vreinterpretq_u8_p16", + "vreinterpretq_u16_p16", + "vreinterpretq_u16_p16", + "vreinterpretq_u32_p16", + "vreinterpretq_u32_p16", + "vreinterpretq_u64_p16", + "vreinterpretq_u64_p16", + "vreinterpretq_p8_p16", + "vreinterpretq_p8_p16", + "vreinterpretq_s8_p128", + "vreinterpretq_s8_p128", + "vreinterpretq_s16_p128", + "vreinterpretq_s16_p128", + "vreinterpretq_s32_p128", + "vreinterpretq_s32_p128", + "vreinterpretq_s64_p128", + "vreinterpretq_s64_p128", + "vreinterpretq_u8_p128", + "vreinterpretq_u8_p128", + "vreinterpretq_u16_p128", + "vreinterpretq_u16_p128", + "vreinterpretq_u32_p128", + "vreinterpretq_u32_p128", + "vreinterpretq_u64_p128", + "vreinterpretq_u64_p128", + "vreinterpretq_p8_p128", + "vreinterpretq_p8_p128", + "vreinterpretq_p16_p128", + "vreinterpretq_p16_p128", + "vreinterpretq_p64_p128", + "vreinterpretq_p64_p128", + "vreinterpret_p64_s8", + "vreinterpret_p64_s8", + "vreinterpretq_p128_s8", + "vreinterpretq_p128_s8", + "vreinterpretq_p64_s8", + "vreinterpretq_p64_s8", + "vreinterpret_p64_s16", + "vreinterpret_p64_s16", + "vreinterpretq_p128_s16", + "vreinterpretq_p128_s16", + "vreinterpretq_p64_s16", + "vreinterpretq_p64_s16", + "vreinterpret_p64_s32", + "vreinterpret_p64_s32", + "vreinterpretq_p128_s32", + "vreinterpretq_p128_s32", + "vreinterpretq_p64_s32", + "vreinterpretq_p64_s32", + "vreinterpretq_p128_s64", + "vreinterpretq_p128_s64", + "vreinterpret_p64_u8", + "vreinterpret_p64_u8", + "vreinterpretq_p128_u8", + "vreinterpretq_p128_u8", + "vreinterpretq_p64_u8", + "vreinterpretq_p64_u8", + "vreinterpret_p64_u16", + "vreinterpret_p64_u16", + "vreinterpretq_p128_u16", + "vreinterpretq_p128_u16", + "vreinterpretq_p64_u16", + "vreinterpretq_p64_u16", + "vreinterpret_p64_u32", + "vreinterpret_p64_u32", + "vreinterpretq_p128_u32", + "vreinterpretq_p128_u32", + "vreinterpretq_p64_u32", + "vreinterpretq_p64_u32", + "vreinterpretq_p128_u64", + "vreinterpretq_p128_u64", + "vreinterpret_p64_p8", + "vreinterpret_p64_p8", + "vreinterpretq_p128_p8", + "vreinterpretq_p128_p8", + "vreinterpretq_p64_p8", + "vreinterpretq_p64_p8", + "vreinterpret_p64_p16", + "vreinterpret_p64_p16", + "vreinterpretq_p128_p16", + "vreinterpretq_p128_p16", + "vreinterpretq_p64_p16", + "vreinterpretq_p64_p16", + "vreinterpret_s8_p64", + "vreinterpret_s8_p64", + "vreinterpret_s16_p64", + "vreinterpret_s16_p64", + "vreinterpret_s32_p64", + "vreinterpret_s32_p64", + "vreinterpret_u8_p64", + "vreinterpret_u8_p64", + "vreinterpret_u16_p64", + "vreinterpret_u16_p64", + "vreinterpret_u32_p64", + "vreinterpret_u32_p64", + "vreinterpret_p8_p64", + "vreinterpret_p8_p64", + "vreinterpret_p16_p64", + "vreinterpret_p16_p64", + "vreinterpretq_p128_p64", + "vreinterpretq_p128_p64", + "vreinterpretq_s8_p64", + "vreinterpretq_s8_p64", + "vreinterpretq_s16_p64", + "vreinterpretq_s16_p64", + "vreinterpretq_s32_p64", + "vreinterpretq_s32_p64", + "vreinterpretq_u8_p64", + "vreinterpretq_u8_p64", + "vreinterpretq_u16_p64", + "vreinterpretq_u16_p64", + "vreinterpretq_u32_p64", + "vreinterpretq_u32_p64", + "vreinterpretq_p8_p64", + "vreinterpretq_p8_p64", + "vreinterpretq_p16_p64", + "vreinterpretq_p16_p64", + "vrev64_f16", + "vrev64q_f16", + "vrndn_f16", + "vrndnq_f16", + "vrndn_f32", + "vrndnq_f32", + "vrshl_s8", + "vrshlq_s8", + "vrshl_s16", + "vrshlq_s16", + "vrshl_s32", + "vrshlq_s32", + "vrshl_s64", + "vrshlq_s64", + "vrshl_u8", + "vrshlq_u8", + "vrshl_u16", + "vrshlq_u16", + "vrshl_u32", + "vrshlq_u32", + "vrshl_u64", + "vrshlq_u64", + "vrshr_n_s8", + "vrshrq_n_s8", + "vrshr_n_s16", + "vrshrq_n_s16", + "vrshr_n_s32", + "vrshrq_n_s32", + "vrshr_n_s64", + "vrshrq_n_s64", + "vrshr_n_u8", + "vrshrq_n_u8", + "vrshr_n_u16", + "vrshrq_n_u16", + "vrshr_n_u32", + "vrshrq_n_u32", + "vrshr_n_u64", + "vrshrq_n_u64", + "vrshrn_n_s16", + "vrshrn_n_s32", + "vrshrn_n_s64", + "vrshrn_n_s16", + "vrshrn_n_s32", + "vrshrn_n_s64", + "vrshrn_n_u16", + "vrshrn_n_u32", + "vrshrn_n_u64", + "vrsqrte_f16", + "vrsqrteq_f16", + "vrsqrteq_f32", + "vrsqrte_u32", + "vrsqrteq_u32", + "vrsqrts_f16", + "vrsqrtsq_f16", + "vrsqrts_f32", + "vrsqrtsq_f32", + "vrsra_n_s8", + "vrsraq_n_s8", + "vrsra_n_s16", + "vrsraq_n_s16", + "vrsra_n_s32", + "vrsraq_n_s32", + "vrsra_n_s64", + "vrsraq_n_s64", + "vrsra_n_u8", + "vrsraq_n_u8", + "vrsra_n_u16", + "vrsraq_n_u16", + "vrsra_n_u32", + "vrsraq_n_u32", + "vrsra_n_u64", + "vrsraq_n_u64", + "vrsubhn_s16", + "vrsubhn_s32", + "vrsubhn_s64", + "vrsubhn_u16", + "vrsubhn_u16", + "vrsubhn_u32", + "vrsubhn_u32", + "vrsubhn_u64", + "vrsubhn_u64", + "vset_lane_f16", + "vsetq_lane_f16", + "vset_lane_f32", + "vsetq_lane_f32", + "vset_lane_s8", + "vsetq_lane_s8", + "vset_lane_s16", + "vsetq_lane_s16", + "vset_lane_s32", + "vsetq_lane_s32", + "vsetq_lane_s64", + "vset_lane_u8", + "vsetq_lane_u8", + "vset_lane_u16", + "vsetq_lane_u16", + "vset_lane_u32", + "vsetq_lane_u32", + "vsetq_lane_u64", + "vset_lane_p8", + "vsetq_lane_p8", + "vset_lane_p16", + "vsetq_lane_p16", + "vset_lane_p64", + "vset_lane_s64", + "vset_lane_u64", + "vsetq_lane_p64", + "vsha1cq_u32", + "vsha1h_u32", + "vsha1mq_u32", + "vsha1pq_u32", + "vsha1su0q_u32", + "vsha1su1q_u32", + "vsha256h2q_u32", + "vsha256hq_u32", + "vsha256su0q_u32", + "vsha256su1q_u32", + "vshl_n_s8", + "vshlq_n_s8", + "vshl_n_s16", + "vshlq_n_s16", + "vshl_n_s32", + "vshlq_n_s32", + "vshl_n_s64", + "vshlq_n_s64", + "vshl_n_u8", + "vshlq_n_u8", + "vshl_n_u16", + "vshlq_n_u16", + "vshl_n_u32", + "vshlq_n_u32", + "vshl_n_u64", + "vshlq_n_u64", + "vshl_s8", + "vshlq_s8", + "vshl_s16", + "vshlq_s16", + "vshl_s32", + "vshlq_s32", + "vshl_s64", + "vshlq_s64", + "vshl_u8", + "vshlq_u8", + "vshl_u16", + "vshlq_u16", + "vshl_u32", + "vshlq_u32", + "vshl_u64", + "vshlq_u64", + "vshll_n_s16", + "vshll_n_s32", + "vshll_n_s8", + "vshll_n_u16", + "vshll_n_u32", + "vshll_n_u8", + "vshr_n_s8", + "vshrq_n_s8", + "vshr_n_s16", + "vshrq_n_s16", + "vshr_n_s32", + "vshrq_n_s32", + "vshr_n_s64", + "vshrq_n_s64", + "vshr_n_u8", + "vshrq_n_u8", + "vshr_n_u16", + "vshrq_n_u16", + "vshr_n_u32", + "vshrq_n_u32", + "vshr_n_u64", + "vshrq_n_u64", + "vshrn_n_s16", + "vshrn_n_s32", + "vshrn_n_s64", + "vshrn_n_u16", + "vshrn_n_u32", + "vshrn_n_u64", + "vsra_n_s8", + "vsraq_n_s8", + "vsra_n_s16", + "vsraq_n_s16", + "vsra_n_s32", + "vsraq_n_s32", + "vsra_n_s64", + "vsraq_n_s64", + "vsra_n_u8", + "vsraq_n_u8", + "vsra_n_u16", + "vsraq_n_u16", + "vsra_n_u32", + "vsraq_n_u32", + "vsra_n_u64", + "vsraq_n_u64", + "vst1_f16", + "vst1q_f16", + "vst1_f16_x2", + "vst1q_f16_x2", + "vst1_f16_x2", + "vst1q_f16_x2", + "vst1_f16_x3", + "vst1q_f16_x3", + "vst1_f16_x3", + "vst1q_f16_x3", + "vst1_f16_x4", + "vst1q_f16_x4", + "vst1_f16_x4", + "vst1q_f16_x4", + "vst1_f32_x2", + "vst1q_f32_x2", + "vst1_f32_x2", + "vst1q_f32_x2", + "vst1_f32_x3", + "vst1q_f32_x3", + "vst1_f32_x4", + "vst1q_f32_x4", + "vst1_f32_x4", + "vst1q_f32_x4", + "vst1_lane_f16", + "vst1q_lane_f16", + "vst1_lane_f32", + "vst1q_lane_f32", + "vst1_lane_s8", + "vst1q_lane_s8", + "vst1_lane_s16", + "vst1q_lane_s16", + "vst1_lane_s32", + "vst1q_lane_s32", + "vst1q_lane_s64", + "vst1_lane_u8", + "vst1q_lane_u8", + "vst1_lane_u16", + "vst1q_lane_u16", + "vst1_lane_u32", + "vst1q_lane_u32", + "vst1q_lane_u64", + "vst1_lane_p8", + "vst1q_lane_p8", + "vst1_lane_p16", + "vst1q_lane_p16", + "vst1_lane_p64", + "vst1_lane_s64", + "vst1_lane_u64", + "vst1_p64_x2", + "vst1_p64_x3", + "vst1_p64_x4", + "vst1q_p64_x2", + "vst1q_p64_x3", + "vst1q_p64_x4", + "vst1_s8_x2", + "vst1q_s8_x2", + "vst1_s16_x2", + "vst1q_s16_x2", + "vst1_s32_x2", + "vst1q_s32_x2", + "vst1_s64_x2", + "vst1q_s64_x2", + "vst1_s8_x2", + "vst1q_s8_x2", + "vst1_s16_x2", + "vst1q_s16_x2", + "vst1_s32_x2", + "vst1q_s32_x2", + "vst1_s64_x2", + "vst1q_s64_x2", + "vst1_s8_x3", + "vst1q_s8_x3", + "vst1_s16_x3", + "vst1q_s16_x3", + "vst1_s32_x3", + "vst1q_s32_x3", + "vst1_s64_x3", + "vst1q_s64_x3", + "vst1_s8_x3", + "vst1q_s8_x3", + "vst1_s16_x3", + "vst1q_s16_x3", + "vst1_s32_x3", + "vst1q_s32_x3", + "vst1_s64_x3", + "vst1q_s64_x3", + "vst1_s8_x4", + "vst1q_s8_x4", + "vst1_s16_x4", + "vst1q_s16_x4", + "vst1_s32_x4", + "vst1q_s32_x4", + "vst1_s64_x4", + "vst1q_s64_x4", + "vst1_s8_x4", + "vst1q_s8_x4", + "vst1_s16_x4", + "vst1q_s16_x4", + "vst1_s32_x4", + "vst1q_s32_x4", + "vst1_s64_x4", + "vst1q_s64_x4", + "vst1_u8_x2", + "vst1_u8_x3", + "vst1_u8_x4", + "vst1q_u8_x2", + "vst1q_u8_x3", + "vst1q_u8_x4", + "vst1_u16_x2", + "vst1_u16_x3", + "vst1_u16_x4", + "vst1q_u16_x2", + "vst1q_u16_x3", + "vst1q_u16_x4", + "vst1_u32_x2", + "vst1_u32_x3", + "vst1_u32_x4", + "vst1q_u32_x2", + "vst1q_u32_x3", + "vst1q_u32_x4", + "vst1_u64_x2", + "vst1_u64_x3", + "vst1_u64_x4", + "vst1q_u64_x2", + "vst1q_u64_x3", + "vst1q_u64_x4", + "vst1_p8_x2", + "vst1_p8_x3", + "vst1_p8_x4", + "vst1q_p8_x2", + "vst1q_p8_x3", + "vst1q_p8_x4", + "vst1_p16_x2", + "vst1_p16_x3", + "vst1_p16_x4", + "vst1q_p16_x2", + "vst1q_p16_x3", + "vst1q_p16_x4", + "vst1q_lane_p64", + "vst2_f16", + "vst2q_f16", + "vst2_f16", + "vst2q_f16", + "vst2_f32", + "vst2q_f32", + "vst2_s8", + "vst2q_s8", + "vst2_s16", + "vst2q_s16", + "vst2_s32", + "vst2q_s32", + "vst2_f32", + "vst2q_f32", + "vst2_s8", + "vst2q_s8", + "vst2_s16", + "vst2q_s16", + "vst2_s32", + "vst2q_s32", + "vst2_lane_f16", + "vst2q_lane_f16", + "vst2_lane_f16", + "vst2q_lane_f16", + "vst2_lane_f32", + "vst2q_lane_f32", + "vst2_lane_s8", + "vst2_lane_s16", + "vst2q_lane_s16", + "vst2_lane_s32", + "vst2q_lane_s32", + "vst2_lane_f32", + "vst2q_lane_f32", + "vst2_lane_s8", + "vst2_lane_s16", + "vst2q_lane_s16", + "vst2_lane_s32", + "vst2q_lane_s32", + "vst2_lane_u8", + "vst2_lane_u16", + "vst2q_lane_u16", + "vst2_lane_u32", + "vst2q_lane_u32", + "vst2_lane_p8", + "vst2_lane_p16", + "vst2q_lane_p16", + "vst2_p64", + "vst2_s64", + "vst2_s64", + "vst2_u64", + "vst2_u8", + "vst2q_u8", + "vst2_u16", + "vst2q_u16", + "vst2_u32", + "vst2q_u32", + "vst2_p8", + "vst2q_p8", + "vst2_p16", + "vst2q_p16", + "vst3_f16", + "vst3q_f16", + "vst3_f16", + "vst3q_f16", + "vst3_f32", + "vst3q_f32", + "vst3_s8", + "vst3q_s8", + "vst3_s16", + "vst3q_s16", + "vst3_s32", + "vst3q_s32", + "vst3_f32", + "vst3q_f32", + "vst3_s8", + "vst3q_s8", + "vst3_s16", + "vst3q_s16", + "vst3_s32", + "vst3q_s32", + "vst3_lane_f16", + "vst3q_lane_f16", + "vst3_lane_f16", + "vst3q_lane_f16", + "vst3_lane_f32", + "vst3q_lane_f32", + "vst3_lane_s8", + "vst3_lane_s16", + "vst3q_lane_s16", + "vst3_lane_s32", + "vst3q_lane_s32", + "vst3_lane_f32", + "vst3q_lane_f32", + "vst3_lane_s8", + "vst3_lane_s16", + "vst3q_lane_s16", + "vst3_lane_s32", + "vst3q_lane_s32", + "vst3_lane_u8", + "vst3_lane_u16", + "vst3q_lane_u16", + "vst3_lane_u32", + "vst3q_lane_u32", + "vst3_lane_p8", + "vst3_lane_p16", + "vst3q_lane_p16", + "vst3_p64", + "vst3_s64", + "vst3_s64", + "vst3_u64", + "vst3_u8", + "vst3q_u8", + "vst3_u16", + "vst3q_u16", + "vst3_u32", + "vst3q_u32", + "vst3_p8", + "vst3q_p8", + "vst3_p16", + "vst3q_p16", + "vst4_f16", + "vst4q_f16", + "vst4_f16", + "vst4q_f16", + "vst4_f32", + "vst4q_f32", + "vst4_s8", + "vst4q_s8", + "vst4_s16", + "vst4q_s16", + "vst4_s32", + "vst4q_s32", + "vst4_f32", + "vst4q_f32", + "vst4_s8", + "vst4q_s8", + "vst4_s16", + "vst4q_s16", + "vst4_s32", + "vst4q_s32", + "vst4_lane_f16", + "vst4q_lane_f16", + "vst4_lane_f16", + "vst4q_lane_f16", + "vst4_lane_f32", + "vst4q_lane_f32", + "vst4_lane_s8", + "vst4_lane_s16", + "vst4q_lane_s16", + "vst4_lane_s32", + "vst4q_lane_s32", + "vst4_lane_f32", + "vst4q_lane_f32", + "vst4_lane_s8", + "vst4_lane_s16", + "vst4q_lane_s16", + "vst4_lane_s32", + "vst4q_lane_s32", + "vst4_lane_u8", + "vst4_lane_u16", + "vst4q_lane_u16", + "vst4_lane_u32", + "vst4q_lane_u32", + "vst4_lane_p8", + "vst4_lane_p16", + "vst4q_lane_p16", + "vst4_p64", + "vst4_s64", + "vst4_s64", + "vst4_u64", + "vst4_u8", + "vst4q_u8", + "vst4_u16", + "vst4q_u16", + "vst4_u32", + "vst4q_u32", + "vst4_p8", + "vst4q_p8", + "vst4_p16", + "vst4q_p16", + "vsub_f16", + "vsubq_f16", + "vsub_s64", + "vsubq_s64", + "vsub_u64", + "vsubq_u64", + "vsubhn_high_s16", + "vsubhn_high_s32", + "vsubhn_high_s64", + "vsubhn_high_u16", + "vsubhn_high_u32", + "vsubhn_high_u64", + "vsubhn_s16", + "vsubhn_s32", + "vsubhn_s64", + "vsubhn_u16", + "vsubhn_u32", + "vsubhn_u64", + "vsubl_s8", + "vsubl_s16", + "vsubl_s32", + "vsubl_u8", + "vsubl_u16", + "vsubl_u32", + "vsubw_s8", + "vsubw_s16", + "vsubw_s32", + "vsubw_u8", + "vsubw_u16", + "vsubw_u32", + "vsudot_lane_s32", + "vsudot_lane_s32", + "vsudotq_lane_s32", + "vsudotq_lane_s32", + "vsudot_laneq_s32", + "vsudotq_laneq_s32", + "vtrn_f16", + "vtrnq_f16", + "vtrn_f32", + "vtrn_s32", + "vtrn_u32", + "vtrnq_f32", + "vtrn_s8", + "vtrnq_s8", + "vtrn_s16", + "vtrnq_s16", + "vtrnq_s32", + "vtrn_u8", + "vtrnq_u8", + "vtrn_u16", + "vtrnq_u16", + "vtrnq_u32", + "vtrn_p8", + "vtrnq_p8", + "vtrn_p16", + "vtrnq_p16", + "vtst_s8", + "vtstq_s8", + "vtst_s16", + "vtstq_s16", + "vtst_s32", + "vtstq_s32", + "vtst_p8", + "vtstq_p8", + "vtst_p16", + "vtstq_p16", + "vtst_u8", + "vtstq_u8", + "vtst_u16", + "vtstq_u16", + "vtst_u32", + "vtstq_u32", + "vusdot_lane_s32", + "vusdot_lane_s32", + "vusdotq_lane_s32", + "vusdotq_lane_s32", + "vusdot_laneq_s32", + "vusdot_laneq_s32", + "vusdotq_laneq_s32", + "vusdotq_laneq_s32", + "vusdot_s32", + "vusdotq_s32", + "vusmmlaq_s32", + "vuzp_f16", + "vuzpq_f16", + "vuzp_f32", + "vuzp_s32", + "vuzp_u32", + "vuzpq_f32", + "vuzp_s8", + "vuzpq_s8", + "vuzp_s16", + "vuzpq_s16", + "vuzpq_s32", + "vuzp_u8", + "vuzpq_u8", + "vuzp_u16", + "vuzpq_u16", + "vuzpq_u32", + "vuzp_p8", + "vuzpq_p8", + "vuzp_p16", + "vuzpq_p16", + "vzip_f16", + "vzipq_f16", + "vzip_f32", + "vzip_s32", + "vzip_u32", + "vzip_s8", + "vzip_s16", + "vzip_u8", + "vzip_u16", + "vzip_p8", + "vzip_p16", + "vzipq_f32", + "vzipq_s8", + "vzipq_s16", + "vzipq_s32", + "vzipq_u8", + "vzipq_u16", + "vzipq_u32", + "vzipq_p8", + "vzipq_p16", + "__rndr", + "__rndrrs", +]; From 2637e0806bc952dcdccaa1c8c6837737612fa1f9 Mon Sep 17 00:00:00 2001 From: David Wood Date: Thu, 15 Jan 2026 15:53:53 +0000 Subject: [PATCH 30/64] gen-arm: add `generate_load_store_tests` Instead of generating load/store tests based on the input filename - which no longer works given the expected input file structure of `stdarch-gen-arm` - add a simple global context option that SVE specs can set. --- library/stdarch/crates/stdarch-gen-arm/src/context.rs | 4 ++++ library/stdarch/crates/stdarch-gen-arm/src/main.rs | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/library/stdarch/crates/stdarch-gen-arm/src/context.rs b/library/stdarch/crates/stdarch-gen-arm/src/context.rs index 9b8eb8e8b9bfe..4d02a82b89661 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/context.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/context.rs @@ -43,6 +43,10 @@ pub struct GlobalContext { /// Should all LLVM wrappers convert their arguments to a signed type #[serde(default)] pub auto_llvm_sign_conversion: bool, + + /// Should SVE load/store tests be generated? + #[serde(default)] + pub generate_load_store_tests: bool, } /// Context of an intrinsic group diff --git a/library/stdarch/crates/stdarch-gen-arm/src/main.rs b/library/stdarch/crates/stdarch-gen-arm/src/main.rs index e14e2782485b9..b7e2aa416fb59 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/main.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/main.rs @@ -54,7 +54,7 @@ fn main() -> Result<(), String> { vv.into_iter().flatten().collect_vec() })?; - if filepath.ends_with("sve.spec.yml") || filepath.ends_with("sve2.spec.yml") { + if input.ctx.generate_load_store_tests { let loads = intrinsics.iter() .filter_map(|i| { if matches!(i.test, Test::Load(..)) { From 8077797d754474b09f69ac5b0b4b9616e4a41230 Mon Sep 17 00:00:00 2001 From: David Wood Date: Sat, 28 Feb 2026 18:00:16 +0000 Subject: [PATCH 31/64] gen-arm: remove `SvUndef` The `SvUndef` expression is no longer necessary as a `core::intrinsics::scalable::sve_undef` intrinsic has been introduced to produce an undefined SVE vector, used by `svundef*` intrinsics. Other intrinsics that used `SvUndef` now use the `svundef*` intrinsics. --- library/stdarch/crates/stdarch-gen-arm/README.md | 3 --- .../stdarch/crates/stdarch-gen-arm/src/expression.rs | 12 ++---------- 2 files changed, 2 insertions(+), 13 deletions(-) diff --git a/library/stdarch/crates/stdarch-gen-arm/README.md b/library/stdarch/crates/stdarch-gen-arm/README.md index 64f1183f1d6d4..970721681c04a 100644 --- a/library/stdarch/crates/stdarch-gen-arm/README.md +++ b/library/stdarch/crates/stdarch-gen-arm/README.md @@ -205,9 +205,6 @@ MatchKind: - `Array` - An array of expressions - Usage: `Array: [, ...]` -- `SvUndef` - - Returns the LLVM `undef` symbol - - Usage: `SvUndef` - `Multiply` - Simply `*` - Usage: `Multiply: [, ]` diff --git a/library/stdarch/crates/stdarch-gen-arm/src/expression.rs b/library/stdarch/crates/stdarch-gen-arm/src/expression.rs index bf48f0dab7498..0b6ffef9d8d3c 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/expression.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/expression.rs @@ -143,8 +143,6 @@ pub enum Expression { LLVMLink(LLVMLink), /// Casts the given expression to the specified (unchecked) type CastAs(Box, String), - /// Returns the LLVM `undef` symbol - SvUndef, /// Multiplication Multiply(Box, Box), /// Xor @@ -295,7 +293,7 @@ impl Expression { /// - An unnecessary `unsafe` is a warning, made into an error by the CI's `-D warnings`. /// /// This **panics** if it encounters an expression that shouldn't appear in a safe function at - /// all (such as `SvUndef`). + /// all. pub fn requires_unsafe_wrapper(&self, ctx_fn: &str) -> bool { match self { // The call will need to be unsafe, but the declaration does not. @@ -347,9 +345,6 @@ impl Expression { }, // We only use macros to check const generics (using static assertions). Self::MacroCall(_name, _args) => false, - // Materialising uninitialised values is always unsafe, and we avoid it in safe - // functions. - Self::SvUndef => panic!("Refusing to wrap unsafe SvUndef in safe function '{ctx_fn}'."), // Variants that aren't tokenised. We shouldn't encounter these here. Self::MatchKind(..) => { unimplemented!("The unsafety of {self:?} cannot be determined in '{ctx_fn}'.") @@ -390,9 +385,7 @@ impl FromStr for Expression { static MACRO_RE: LazyLock = LazyLock::new(|| Regex::new(r"^(?P[\w\d_]+)!\((?P.*?)\);?$").unwrap()); - if s == "SvUndef" { - Ok(Expression::SvUndef) - } else if MACRO_RE.is_match(s) { + if MACRO_RE.is_match(s) { let c = MACRO_RE.captures(s).unwrap(); let ex = c["ex"].to_string(); let _: TokenStream = ex @@ -533,7 +526,6 @@ impl ToTokens for Expression { let ty: TokenStream = ty.parse().expect("invalid syntax"); tokens.append_all(quote! { #ex as #ty }) } - Self::SvUndef => tokens.append_all(quote! { simd_reinterpret(()) }), Self::Multiply(lhs, rhs) => tokens.append_all(quote! { #lhs * #rhs }), Self::Xor(lhs, rhs) => tokens.append_all(quote! { #lhs ^ #rhs }), Self::Type(ty) => ty.to_tokens(tokens), From 55b65ff1ee6ed650d2f50fa30968f91d7cf418a6 Mon Sep 17 00:00:00 2001 From: David Wood Date: Thu, 15 Jan 2026 15:53:53 +0000 Subject: [PATCH 32/64] gen-arm: s/simd_reinterpret/transmute_unchecked `simd_reinterpret` was expected to be used when it was added as `transmute_unchecked` requires `Sized`, but scalable vectors are now `Sized` so `transmute_unchecked` can be used and `simd_reinterpret` was not added in rust-lang/rust#143924. --- library/stdarch/crates/stdarch-gen-arm/src/typekinds.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/stdarch/crates/stdarch-gen-arm/src/typekinds.rs b/library/stdarch/crates/stdarch-gen-arm/src/typekinds.rs index bd47ff2bd1557..c3aa22294d9fc 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/typekinds.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/typekinds.rs @@ -289,9 +289,9 @@ impl TypeKind { ( BaseType::Sized(Float | Int | UInt, _), BaseType::Sized(Float | Int | UInt, _), - ) => Some(FnCall::new_expression( + ) => Some(FnCall::new_unsafe_expression( // Conversions between float and (u)int, or where the lane size changes. - "simd_reinterpret".parse().unwrap(), + "transmute_unchecked".parse().unwrap(), vec![expr.into()], )), _ => None, From c8840791439caf5709b6a7d77cb309a37925b0d8 Mon Sep 17 00:00:00 2001 From: David Wood Date: Thu, 15 Jan 2026 15:53:53 +0000 Subject: [PATCH 33/64] gen-arm: `auto-llvm-sign-conversion` not for `into` Matching the current behaviour for arguments, `auto_llvm_sign_conversion` should only be required for `as_unsigned` conversions, not `into` conversions. --- .../crates/stdarch-gen-arm/src/intrinsic.rs | 33 +++++++++---------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs b/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs index ce427d54b3552..e20ab6779cfcd 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs @@ -647,27 +647,26 @@ impl LLVMLink { }) .try_collect()?; - let return_type_conversion = if !ctx.global.auto_llvm_sign_conversion { - None - } else { - self.signature - .as_ref() - .and_then(|sig| sig.return_type.as_ref()) - .and_then(|ty| { - if let Some(Sized(Bool, bitsize)) = ty.base_type() { - (*bitsize != 8).then_some(Bool) - } else if let Some(Sized(UInt, _) | Unsized(UInt)) = ty.base_type() { - Some(UInt) - } else { - None - } - }) - }; + let return_type_conversion = self + .signature + .as_ref() + .and_then(|sig| sig.return_type.as_ref()) + .and_then(|ty| { + if let Some(Sized(Bool, bitsize)) = ty.base_type() { + (*bitsize != 8).then_some(Bool) + } else if let Some(Sized(UInt, _) | Unsized(UInt)) = ty.base_type() { + Some(UInt) + } else { + None + } + }); let fn_call = Expression::FnCall(fn_call); match return_type_conversion { Some(Bool) => Ok(convert("into", fn_call)), - Some(UInt) => Ok(convert("as_unsigned", fn_call)), + Some(UInt) if ctx.global.auto_llvm_sign_conversion => { + Ok(convert("as_unsigned", fn_call)) + } _ => Ok(fn_call), } } From 21201482754b7ede778d407435e4656b9d23674b Mon Sep 17 00:00:00 2001 From: David Wood Date: Thu, 15 Jan 2026 15:53:53 +0000 Subject: [PATCH 34/64] core_arch: add `static_assert_range` This is a convenience macro used by the generated SVE intrinsics. Co-authored-by: Jamie Cunliffe Co-authored-by: Luca Vizzarro Co-authored-by: Adam Gemmell Co-authored-by: Jacob Bramley --- library/stdarch/crates/core_arch/src/macros.rs | 16 ++++++++++++++++ .../crates/stdarch-gen-arm/src/context.rs | 4 ++-- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/macros.rs b/library/stdarch/crates/core_arch/src/macros.rs index 00e92428b3e7e..83039bc65acc9 100644 --- a/library/stdarch/crates/core_arch/src/macros.rs +++ b/library/stdarch/crates/core_arch/src/macros.rs @@ -14,6 +14,22 @@ macro_rules! static_assert { }; } +#[allow(unused_macros)] +macro_rules! static_assert_range { + ($imm:ident, $min:literal..=$max:literal) => { + static_assert!( + $min <= $imm && $imm <= $max, + concat!( + stringify!($imm), + " is not in range ", + stringify!($min), + "-", + stringify!($max), + ) + ) + }; +} + #[allow(unused_macros)] macro_rules! static_assert_uimm_bits { ($imm:ident, $bits:expr) => { diff --git a/library/stdarch/crates/stdarch-gen-arm/src/context.rs b/library/stdarch/crates/stdarch-gen-arm/src/context.rs index 4d02a82b89661..85342a1804854 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/context.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/context.rs @@ -222,7 +222,7 @@ impl LocalContext { } => Ok(Expression::MacroCall( "static_assert_range".to_string(), format!( - "{variable}, {min}, {max}", + "{variable}, {min}..={max}", min = range.start(), max = range.end() ), @@ -250,7 +250,7 @@ impl LocalContext { |bitsize| Ok(higher_limit / bitsize - 1))?; Ok(Expression::MacroCall( "static_assert_range".to_string(), - format!("{variable}, 0, {max}"), + format!("{variable}, 0..={max}"), )) } else { Err(format!( From 826ab8ba0ec76ba9ebb81f7fe10733fdb1944f40 Mon Sep 17 00:00:00 2001 From: David Wood Date: Thu, 15 Jan 2026 15:53:53 +0000 Subject: [PATCH 35/64] core_arch: sve types Add the SVE types (without any of the generated intrinsics) and empty modules where the generated intrinsics will be. Enables the `adt_const_params` crate feature that the generated intrinsics will use. Co-authored-by: Jamie Cunliffe Co-authored-by: Luca Vizzarro Co-authored-by: Adam Gemmell Co-authored-by: Jacob Bramley --- .../crates/core_arch/src/aarch64/mod.rs | 8 + .../core_arch/src/aarch64/sve/generated.rs | 1 + .../crates/core_arch/src/aarch64/sve/mod.rs | 379 ++++++++++++++++++ .../core_arch/src/aarch64/sve2/generated.rs | 1 + .../crates/core_arch/src/aarch64/sve2/mod.rs | 17 + library/stdarch/crates/core_arch/src/lib.rs | 3 +- 6 files changed, 408 insertions(+), 1 deletion(-) create mode 100644 library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs create mode 100644 library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs create mode 100644 library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs create mode 100644 library/stdarch/crates/core_arch/src/aarch64/sve2/mod.rs diff --git a/library/stdarch/crates/core_arch/src/aarch64/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/mod.rs index d7295659c3c9a..9376e04b3b53a 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/mod.rs @@ -25,6 +25,14 @@ mod neon; #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub use self::neon::*; +mod sve; +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub use self::sve::*; + +mod sve2; +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub use self::sve2::*; + mod prefetch; #[unstable(feature = "stdarch_aarch64_prefetch", issue = "117217")] pub use self::prefetch::*; diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs new file mode 100644 index 0000000000000..8b137891791fe --- /dev/null +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs @@ -0,0 +1 @@ + diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs new file mode 100644 index 0000000000000..a3f70ab61c40f --- /dev/null +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs @@ -0,0 +1,379 @@ +//! SVE intrinsics + +#![allow(non_camel_case_types)] + +// `generated.rs` has a `super::*` and this import is for that +use crate::intrinsics::{simd::*, *}; + +#[rustfmt::skip] +mod generated; +#[rustfmt::skip] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub use self::generated::*; + +use crate::{marker::ConstParamTy, mem::transmute}; + +pub(super) trait AsUnsigned { + type Unsigned; + unsafe fn as_unsigned(self) -> Self::Unsigned; +} + +pub(super) trait AsSigned { + type Signed; + unsafe fn as_signed(self) -> Self::Signed; +} + +/// Same as `Into` but with into being unsafe so that it can have the required `target_feature` +pub(super) trait SveInto: Sized { + unsafe fn sve_into(self) -> T; +} + +macro_rules! impl_sve_type { + ($(($v:vis, $elem_type:ty, $name:ident, $elt:literal))*) => ($( + #[doc = concat!("Scalable vector of type ", stringify!($elem_type))] + #[derive(Clone, Copy, Debug)] + #[rustc_scalable_vector($elt)] + #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] + $v struct $name($elem_type); + )*) +} + +macro_rules! impl_sve_tuple_type { + ($(($v:vis, $vec_type:ty, $elt:tt, $name:ident))*) => ($( + impl_sve_tuple_type!(@ ($v, $vec_type, $elt, $name)); + )*); + (@ ($v:vis, $vec_type:ty, 2, $name:ident)) => ( + #[doc = concat!("Two-element tuple of scalable vectors of type ", stringify!($vec_type))] + #[derive(Clone, Copy, Debug)] + #[rustc_scalable_vector] + #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] + $v struct $name($vec_type, $vec_type); + ); + (@ ($v:vis, $vec_type:ty, 3, $name:ident)) => ( + #[doc = concat!("Three-element tuple of scalable vectors of type ", stringify!($vec_type))] + #[derive(Clone, Copy, Debug)] + #[rustc_scalable_vector] + #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] + $v struct $name($vec_type, $vec_type, $vec_type); + ); + (@ ($v:vis, $vec_type:ty, 4, $name:ident)) => ( + #[doc = concat!("Four-element tuple of scalable vectors of type ", stringify!($vec_type))] + #[derive(Clone, Copy, Debug)] + #[rustc_scalable_vector] + #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] + $v struct $name($vec_type, $vec_type, $vec_type, $vec_type); + ); +} + +macro_rules! impl_sign_conversions_sv { + ($(($signed:ty, $unsigned:ty))*) => ($( + impl AsUnsigned for $signed { + type Unsigned = $unsigned; + + #[inline] + #[target_feature(enable = "sve")] + unsafe fn as_unsigned(self) -> $unsigned { + transmute_unchecked(self) + } + } + + impl AsSigned for $unsigned { + type Signed = $signed; + + #[inline] + #[target_feature(enable = "sve")] + unsafe fn as_signed(self) -> $signed { + transmute_unchecked(self) + } + } + )*) +} + +macro_rules! impl_sign_conversions { + ($(($signed:ty, $unsigned:ty))*) => ($( + impl AsUnsigned for $signed { + type Unsigned = $unsigned; + + #[inline] + #[target_feature(enable = "sve")] + unsafe fn as_unsigned(self) -> $unsigned { + transmute(self) + } + } + + impl AsSigned for $unsigned { + type Signed = $signed; + + #[inline] + #[target_feature(enable = "sve")] + unsafe fn as_signed(self) -> $signed { + transmute(self) + } + } + )*) +} + +/// LLVM requires the predicate lane count to be the same as the lane count +/// it's working with. However the ACLE only defines one bool type and the +/// instruction set doesn't have this distinction. As a result we have to +/// create these internal types so we can match the LLVM signature. Each of +/// these internal types can be converted to the public `svbool_t` type and +/// the `svbool_t` type can be converted into these. +macro_rules! impl_internal_sve_predicate { + ($(($name:ident, $elt:literal))*) => ($( + impl_sve_type! { + (pub(super), bool, $name, $elt) + } + + impl SveInto for $name { + #[inline] + #[target_feature(enable = "sve")] + unsafe fn sve_into(self) -> svbool_t { + #[allow(improper_ctypes)] + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = concat!("llvm.aarch64.sve.convert.to.svbool.nxv", $elt, "i1") + )] + fn convert_to_svbool(b: $name) -> svbool_t; + } + unsafe { convert_to_svbool(self) } + } + } + + #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] + impl SveInto<$name> for svbool_t { + #[inline] + #[target_feature(enable = "sve")] + unsafe fn sve_into(self) -> $name { + #[allow(improper_ctypes)] + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = concat!("llvm.aarch64.sve.convert.from.svbool.nxv", $elt, "i1") + )] + fn convert_from_svbool(b: svbool_t) -> $name; + } + unsafe { convert_from_svbool(self) } + } + } + )*) +} + +impl_sve_type! { + (pub, bool, svbool_t, 16) + + (pub, i8, svint8_t, 16) + (pub, u8, svuint8_t, 16) + + (pub, i16, svint16_t, 8) + (pub, u16, svuint16_t, 8) + (pub, f32, svfloat32_t, 4) + (pub, i32, svint32_t, 4) + (pub, u32, svuint32_t, 4) + (pub, f64, svfloat64_t, 2) + (pub, i64, svint64_t, 2) + (pub, u64, svuint64_t, 2) + + // Internal types: + (pub(super), i8, nxv2i8, 2) + (pub(super), i8, nxv4i8, 4) + (pub(super), i8, nxv8i8, 8) + + (pub(super), i16, nxv2i16, 2) + (pub(super), i16, nxv4i16, 4) + + (pub(super), i32, nxv2i32, 2) + + (pub(super), u8, nxv2u8, 2) + (pub(super), u8, nxv4u8, 4) + (pub(super), u8, nxv8u8, 8) + + (pub(super), u16, nxv2u16, 2) + (pub(super), u16, nxv4u16, 4) + + (pub(super), u32, nxv2u32, 2) +} + +impl_sve_tuple_type! { + (pub, svint8_t, 2, svint8x2_t) + (pub, svuint8_t, 2, svuint8x2_t) + (pub, svint16_t, 2, svint16x2_t) + (pub, svuint16_t, 2, svuint16x2_t) + (pub, svfloat32_t, 2, svfloat32x2_t) + (pub, svint32_t, 2, svint32x2_t) + (pub, svuint32_t, 2, svuint32x2_t) + (pub, svfloat64_t, 2, svfloat64x2_t) + (pub, svint64_t, 2, svint64x2_t) + (pub, svuint64_t, 2, svuint64x2_t) + + (pub, svint8_t, 3, svint8x3_t) + (pub, svuint8_t, 3, svuint8x3_t) + (pub, svint16_t, 3, svint16x3_t) + (pub, svuint16_t, 3, svuint16x3_t) + (pub, svfloat32_t, 3, svfloat32x3_t) + (pub, svint32_t, 3, svint32x3_t) + (pub, svuint32_t, 3, svuint32x3_t) + (pub, svfloat64_t, 3, svfloat64x3_t) + (pub, svint64_t, 3, svint64x3_t) + (pub, svuint64_t, 3, svuint64x3_t) + + (pub, svint8_t, 4, svint8x4_t) + (pub, svuint8_t, 4, svuint8x4_t) + (pub, svint16_t, 4, svint16x4_t) + (pub, svuint16_t, 4, svuint16x4_t) + (pub, svfloat32_t, 4, svfloat32x4_t) + (pub, svint32_t, 4, svint32x4_t) + (pub, svuint32_t, 4, svuint32x4_t) + (pub, svfloat64_t, 4, svfloat64x4_t) + (pub, svint64_t, 4, svint64x4_t) + (pub, svuint64_t, 4, svuint64x4_t) +} + +impl_sign_conversions! { + (i8, u8) + (i16, u16) + (i32, u32) + (i64, u64) + (*const i8, *const u8) + (*const i16, *const u16) + (*const i32, *const u32) + (*const i64, *const u64) + (*mut i8, *mut u8) + (*mut i16, *mut u16) + (*mut i32, *mut u32) + (*mut i64, *mut u64) +} + +impl_sign_conversions_sv! { + (svint8_t, svuint8_t) + (svint16_t, svuint16_t) + (svint32_t, svuint32_t) + (svint64_t, svuint64_t) + + (svint8x2_t, svuint8x2_t) + (svint16x2_t, svuint16x2_t) + (svint32x2_t, svuint32x2_t) + (svint64x2_t, svuint64x2_t) + + (svint8x3_t, svuint8x3_t) + (svint16x3_t, svuint16x3_t) + (svint32x3_t, svuint32x3_t) + (svint64x3_t, svuint64x3_t) + + (svint8x4_t, svuint8x4_t) + (svint16x4_t, svuint16x4_t) + (svint32x4_t, svuint32x4_t) + (svint64x4_t, svuint64x4_t) + + // Internal types: + (nxv2i8, nxv2u8) + (nxv4i8, nxv4u8) + (nxv8i8, nxv8u8) + + (nxv2i16, nxv2u16) + (nxv4i16, nxv4u16) + + (nxv2i32, nxv2u32) +} + +impl_internal_sve_predicate! { + (svbool2_t, 2) + (svbool4_t, 4) + (svbool8_t, 8) +} + +/// Patterns returned by a `PTRUE` +#[repr(i32)] +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, ConstParamTy)] +#[non_exhaustive] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub enum svpattern { + /// Activate the largest power-of-two number of elements that is less than the vector length + SV_POW2 = 0, + /// Activate the first element + SV_VL1 = 1, + /// Activate the first two elements + SV_VL2 = 2, + /// Activate the first three elements + SV_VL3 = 3, + /// Activate the first four elements + SV_VL4 = 4, + /// Activate the first five elements + SV_VL5 = 5, + /// Activate the first six elements + SV_VL6 = 6, + /// Activate the first seven elements + SV_VL7 = 7, + /// Activate the first eight elements + SV_VL8 = 8, + /// Activate the first sixteen elements + SV_VL16 = 9, + /// Activate the first thirty-two elements + SV_VL32 = 10, + /// Activate the first sixty-four elements + SV_VL64 = 11, + /// Activate the first one-hundred-and-twenty-eight elements + SV_VL128 = 12, + /// Activate the first two-hundred-and-fifty-six elements + SV_VL256 = 13, + /// Activate the largest multiple-of-four number of elements that is less than the vector length + SV_MUL4 = 29, + /// Activate the largest multiple-of-three number of elements that is less than the vector + /// length + SV_MUL3 = 30, + /// Activate all elements + SV_ALL = 31, +} + +/// Addressing mode for prefetch intrinsics - allows the specification of the expected access +/// kind (read or write), the cache level to load the data, the data retention policy +/// (temporal or streaming) +#[repr(i32)] +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, ConstParamTy)] +#[non_exhaustive] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub enum svprfop { + /// Temporal fetch of the addressed location for reading to the L1 cache (i.e. allocate in + /// cache normally) + SV_PLDL1KEEP = 0, + /// Streaming fetch of the addressed location for reading to the L1 cache (i.e. memory only + /// used once) + SV_PLDL1STRM = 1, + /// Temporal fetch of the addressed location for reading to the L2 cache (i.e. allocate in + /// cache normally) + SV_PLDL2KEEP = 2, + /// Streaming fetch of the addressed location for reading to the L2 cache (i.e. memory only + /// used once) + SV_PLDL2STRM = 3, + /// Temporal fetch of the addressed location for reading to the L3 cache (i.e. allocate in + /// cache normally) + SV_PLDL3KEEP = 4, + /// Streaming fetch of the addressed location for reading to the L3 cache (i.e. memory only + /// used once) + SV_PLDL3STRM = 5, + /// Temporal fetch of the addressed location for writing to the L1 cache (i.e. allocate in + /// cache normally) + SV_PSTL1KEEP = 8, + /// Temporal fetch of the addressed location for writing to the L1 cache (i.e. memory only + /// used once) + SV_PSTL1STRM = 9, + /// Temporal fetch of the addressed location for writing to the L2 cache (i.e. allocate in + /// cache normally) + SV_PSTL2KEEP = 10, + /// Temporal fetch of the addressed location for writing to the L2 cache (i.e. memory only + /// used once) + SV_PSTL2STRM = 11, + /// Temporal fetch of the addressed location for writing to the L3 cache (i.e. allocate in + /// cache normally) + SV_PSTL3KEEP = 12, + /// Temporal fetch of the addressed location for writing to the L3 cache (i.e. memory only + /// used once) + SV_PSTL3STRM = 13, +} + +#[cfg(test)] +#[path = "ld_st_tests_aarch64.rs"] +mod ld_st_tests; diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs new file mode 100644 index 0000000000000..8b137891791fe --- /dev/null +++ b/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs @@ -0,0 +1 @@ + diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve2/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/sve2/mod.rs new file mode 100644 index 0000000000000..acf9070214571 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/aarch64/sve2/mod.rs @@ -0,0 +1,17 @@ +//! SVE2 intrinsics + +#![allow(non_camel_case_types)] + +// `generated.rs` has a `super::*` and this import is for that +use super::sve::*; +use crate::intrinsics::*; + +#[rustfmt::skip] +mod generated; +#[rustfmt::skip] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub use self::generated::*; + +#[cfg(test)] +#[path = "ld_st_tests_aarch64.rs"] +mod ld_st_tests; diff --git a/library/stdarch/crates/core_arch/src/lib.rs b/library/stdarch/crates/core_arch/src/lib.rs index 9255994e5ee81..f2f19eba26705 100644 --- a/library/stdarch/crates/core_arch/src/lib.rs +++ b/library/stdarch/crates/core_arch/src/lib.rs @@ -40,7 +40,8 @@ const_cmp, const_eval_select, maybe_uninit_as_bytes, - movrs_target_feature + movrs_target_feature, + min_adt_const_params )] #![cfg_attr(test, feature(test, abi_vectorcall, stdarch_internal))] #![deny(clippy::missing_inline_in_public_items)] From 78ccc9277080f2dfec7003801aa2362d2ccdab0f Mon Sep 17 00:00:00 2001 From: David Wood Date: Fri, 16 Jan 2026 12:30:36 +0000 Subject: [PATCH 36/64] gen-arm: use `sve_into` instead of `into` `Into::into` can't be used here because the implementations can't have the required target feature, so `SveInto` needs to be introduced and written by the generator --- library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs b/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs index e20ab6779cfcd..18a638a0390bf 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs @@ -630,7 +630,7 @@ impl LLVMLink { match (scope, kind.base_type()) { (Argument, Some(Sized(Bool, bitsize))) if *bitsize != 8 => { - Ok(convert("into", arg)) + Ok(convert("sve_into", arg)) } (Argument, Some(Sized(UInt, _) | Unsized(UInt))) => { if ctx.global.auto_llvm_sign_conversion { @@ -663,7 +663,7 @@ impl LLVMLink { let fn_call = Expression::FnCall(fn_call); match return_type_conversion { - Some(Bool) => Ok(convert("into", fn_call)), + Some(Bool) => Ok(convert("sve_into", fn_call)), Some(UInt) if ctx.global.auto_llvm_sign_conversion => { Ok(convert("as_unsigned", fn_call)) } From a7d4530a985d688aca5d0c003df46a1fca05cd66 Mon Sep 17 00:00:00 2001 From: David Wood Date: Wed, 4 Mar 2026 14:16:40 +0000 Subject: [PATCH 37/64] gen-arm: correct renamed `from_exposed_addr` link `core::ptr::from_exposed_addr` was renamed to `core::ptr::with_exposed_provenance` and so this link needs updated. --- library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs b/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs index 18a638a0390bf..5d38d45ca6900 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs @@ -871,8 +871,8 @@ impl fmt::Display for UnsafetyComment { Self::NoProvenance(arg) => write!( f, "Addresses passed in `{arg}` lack provenance, so this is similar to using a \ - `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before \ - using it." + `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane \ + before using it." ), Self::UnpredictableOnFault => write!( f, From ca5032f50fc5acf936f64ab252ae97ecff8c0c60 Mon Sep 17 00:00:00 2001 From: David Wood Date: Thu, 15 Jan 2026 15:53:53 +0000 Subject: [PATCH 38/64] gen-arm: add sve intrinsic definitions Thousands of lines of SVE intrinsic definitions.. Co-authored-by: Jamie Cunliffe Co-authored-by: Luca Vizzarro Co-authored-by: Adam Gemmell Co-authored-by: Jacob Bramley --- .../stdarch-gen-arm/spec/sve/aarch64.spec.yml | 5199 +++++++++++++++++ .../spec/sve2/aarch64.spec.yml | 3196 ++++++++++ 2 files changed, 8395 insertions(+) create mode 100644 library/stdarch/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml create mode 100644 library/stdarch/crates/stdarch-gen-arm/spec/sve2/aarch64.spec.yml diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml new file mode 100644 index 0000000000000..1fad8bb371f90 --- /dev/null +++ b/library/stdarch/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml @@ -0,0 +1,5199 @@ +arch_cfgs: + - arch_name: aarch64 + target_feature: [sve] + llvm_prefix: llvm.aarch64.sve + +uses_neon_types: true +auto_llvm_sign_conversion: true +generate_load_store_tests: true + +# `#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]` +sve-unstable: &sve-unstable + FnCall: [unstable, ['feature = "stdarch_aarch64_sve"', 'issue= "145052"']] + +intrinsics: + - name: svacge[{_n}_{type}] + attr: [*sve-unstable] + doc: Absolute compare greater than or equal to + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64] + assert_instr: [facge] + n_variant_op: op2 + compose: + - LLVMLink: { name: "facge.{sve_type}" } + + - name: svacgt[{_n}_{type}] + attr: [*sve-unstable] + doc: Absolute compare greater than + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64] + assert_instr: [facgt] + n_variant_op: op2 + compose: + - LLVMLink: { name: "facgt.{sve_type}" } + + - name: svacle[{_n}_{type}] + attr: [*sve-unstable] + doc: Absolute compare less than or equal to + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64] + assert_instr: [facge] + n_variant_op: op2 + compose: + - FnCall: ["svacge_{type}", [$pg, $op2, $op1]] + + - name: svaclt[{_n}_{type}] + attr: [*sve-unstable] + doc: Absolute compare less than + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64] + assert_instr: [facgt] + n_variant_op: op2 + compose: + - FnCall: ["svacgt_{type}", [$pg, $op2, $op1]] + + - name: svcadd[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Complex add with rotate + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + static_defs: ["const IMM_ROTATION: i32"] + constraints: [{ variable: IMM_ROTATION, any_values: [90, 270] }] + assert_instr: [[fcadd, "IMM_ROTATION = 90"]] + zeroing_method: { select: op1 } + compose: + - LLVMLink: + name: fcadd.{sve_type} + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$pg, $op1, $op2, $IMM_ROTATION]] + + - name: svcmla[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Complex multiply-add with rotate + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64] + static_defs: ["const IMM_ROTATION: i32"] + constraints: [{ variable: IMM_ROTATION, any_values: [0, 90, 180, 270] }] + assert_instr: [[fcmla, "IMM_ROTATION = 90"]] + zeroing_method: { select: op1 } + compose: + - LLVMLink: + name: fcmla.{sve_type} + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$pg, $op1, $op2, $op3, $IMM_ROTATION]] + + - name: svcmla_lane[_{type}] + attr: [*sve-unstable] + doc: Complex multiply-add with rotate + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [f32] + static_defs: ["const IMM_INDEX: i32", "const IMM_ROTATION: i32"] + constraints: + - variable: IMM_INDEX + range: { match_size: "{type}", default: [0, 1], halfword: [0, 3] } + - { variable: IMM_ROTATION, any_values: [0, 90, 180, 270] } + assert_instr: [[fcmla, "IMM_INDEX = 0, IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: fcmla.lane.x.{sve_type} + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "imm_index: i32" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX, $IMM_ROTATION]] + + - name: svadd[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Add + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + assert_instr: ["{type_kind.f}add"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.f}add.{sve_type}" } + + - name: svqsub[{_n}_{type}] + attr: [*sve-unstable] + doc: Saturating subtract + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.su}qsub"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}qsub.x.{sve_type}" } + + - name: svcnt[_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Count nonzero bits + arguments: + ["inactive: {sve_type[1]}", "pg: {predicate[0]}", "op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [f32, u32] + - [f64, u64] + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + - [u8, u8] + - [u16, u16] + - [u32, u32] + - [u64, u64] + zeroing_method: { drop: inactive } + assert_instr: [cnt] + compose: + - LLVMLink: { name: "cnt.{sve_type[0]}" } + + - name: svcls[_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Count leading sign bits + arguments: + ["inactive: {sve_type[1]}", "pg: {predicate[0]}", "op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: [[i8, u8], [i16, u16], [i32, u32], [i64, u64]] + zeroing_method: { drop: inactive } + assert_instr: [cls] + compose: + - LLVMLink: { name: "cls.{sve_type[0]}" } + + - name: svclz[_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Count leading zero bits + arguments: + ["inactive: {sve_type[1]}", "pg: {predicate[0]}", "op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + - [u8, u8] + - [u16, u16] + - [u32, u32] + - [u64, u64] + zeroing_method: { drop: inactive } + assert_instr: [clz] + compose: + - LLVMLink: { name: "clz.{sve_type[0]}" } + + - name: svext{size_literal[1]}[_{type[0]}]{_mxz} + attr: [*sve-unstable] + substitutions: + sign_or_zero: + match_kind: "{type[0]}" + default: Sign + unsigned: Zero + kind_literal: { match_kind: "{type[0]}", default: s, unsigned: u } + doc: "{sign_or_zero}-extend the low {size[1]} bits" + arguments: + ["inactive: {sve_type[0]}", "pg: {predicate[0]}", "op: {sve_type[0]}"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], i8] + - [[i32, i64, u32, u64], i16] + - [[i64, u64], i32] + zeroing_method: { drop: inactive } + assert_instr: ["{type_kind[0].su}xt{size_literal[1]}"] + compose: + - LLVMLink: + name: "{type_kind[0].su}xt{size_literal[1]}.{sve_type[0]}" + + - name: svsqrt[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Square root + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + zeroing_method: { drop: inactive } + assert_instr: [fsqrt] + compose: + - LLVMLink: { name: "fsqrt.{sve_type}" } + + - name: svcmpeq[{_n}_{type}] + attr: [*sve-unstable] + doc: Compare equal to + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [{ float: fcmeq, default: cmpeq }] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.f}cmpeq.{sve_type}" } + + - name: svcmpeq_wide[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Compare equal to + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{predicate[0]}" + types: + - [[i8, i16, i32], i64] + assert_instr: [cmpeq] + n_variant_op: op2 + compose: + - LLVMLink: { name: "cmpeq.wide.{sve_type[0]}" } + + - name: svcmpge[{_n}_{type}] + attr: [*sve-unstable] + doc: Compare greater than or equal to + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [{ float: fcmge, default: cmpge, unsigned: cmphs }] + n_variant_op: op2 + compose: + - MatchKind: + - "{type}" + - default: + LLVMLink: { name: "{type_kind.f}cmpge.{sve_type}" } + unsigned: + LLVMLink: { name: "cmphs.{sve_type}" } + + - name: svcmpge_wide[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Compare greater than or equal to + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{predicate[0]}" + n_variant_op: op2 + types: + - [[i8, i16, i32], i64] + - [[u8, u16, u32], u64] + assert_instr: [{ default: cmpge, unsigned: cmphs }] + compose: + - MatchKind: + - "{type[0]}" + - default: + LLVMLink: { name: "cmpge.wide.{sve_type[0]}" } + unsigned: + LLVMLink: { name: "cmphs.wide.{sve_type[0]}" } + + - name: svcmpgt[{_n}_{type}] + attr: [*sve-unstable] + doc: Compare greater than + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [{ float: fcmgt, default: cmpgt, unsigned: cmphi }] + n_variant_op: op2 + compose: + - MatchKind: + - "{type}" + - default: + LLVMLink: { name: "{type_kind.f}cmpgt.{sve_type}" } + unsigned: + LLVMLink: { name: "cmphi.{sve_type}" } + + - name: svcmpgt_wide[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Compare greater than + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{predicate[0]}" + types: + - [[i8, i16, i32], i64] + - [[u8, u16, u32], u64] + assert_instr: [{ default: cmpgt, unsigned: cmphi }] + n_variant_op: op2 + compose: + - MatchKind: + - "{type[0]}" + - default: + LLVMLink: { name: "cmpgt.wide.{sve_type[0]}" } + unsigned: + LLVMLink: { name: "cmphi.wide.{sve_type[0]}" } + + - name: svcmple[{_n}_{type}] + attr: [*sve-unstable] + doc: Compare less than or equal to + arguments: ["pg: svbool_t", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "svbool_t" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [{ float: fcmge, default: cmpge, unsigned: cmphs }] + n_variant_op: op2 + compose: + - FnCall: ["svcmpge_{type}", [$pg, $op2, $op1]] + + - name: svcmple_wide[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Compare less than or equal to + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{predicate[0]}" + types: + - [[i8, i16, i32], i64] + - [[u8, u16, u32], u64] + assert_instr: [{ default: cmple, unsigned: cmpls }] + n_variant_op: op2 + compose: + - MatchKind: + - "{type[0]}" + - default: + LLVMLink: { name: "cmple.wide.{sve_type[0]}" } + unsigned: + LLVMLink: { name: "cmpls.wide.{sve_type[0]}" } + + - name: svcmplt[{_n}_{type}] + attr: [*sve-unstable] + doc: Compare less than + arguments: ["pg: svbool_t", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "svbool_t" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [{ float: fcmgt, default: cmpgt, unsigned: cmphi }] + n_variant_op: op2 + compose: + - FnCall: ["svcmpgt_{type}", [$pg, $op2, $op1]] + + - name: svcmplt_wide[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Compare less than + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{predicate[0]}" + types: + - [[i8, i16, i32], i64] + - [[u8, u16, u32], u64] + assert_instr: [{ default: cmplt, unsigned: cmplo }] + n_variant_op: op2 + compose: + - MatchKind: + - "{type[0]}" + - default: + LLVMLink: { name: "cmplt.wide.{sve_type[0]}" } + unsigned: + LLVMLink: { name: "cmplo.wide.{sve_type[0]}" } + + - name: svcmpne[{_n}_{type}] + attr: [*sve-unstable] + doc: Compare not equal to + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [{ float: fcmne, default: cmpne }] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.f}cmpne.{sve_type}" } + + - name: svcmpne_wide[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Compare not equal to + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{predicate[0]}" + types: [[[i8, i16, i32], i64]] + assert_instr: [cmpne] + n_variant_op: op2 + compose: + - LLVMLink: { name: "cmpne.wide.{sve_type[0]}" } + + - name: svcmpuo[{_n}_{type}] + attr: [*sve-unstable] + doc: Compare unordered with + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64] + assert_instr: [fcmuo] + n_variant_op: op2 + compose: + - LLVMLink: { name: "fcmpuo.{sve_type}" } + + - name: svcnt{size_literal} + attr: [*sve-unstable] + doc: Count the number of {size}-bit elements in a vector + arguments: [] + return_type: u64 + types: [i8, i16, i32, i64] + assert_instr: + - default: { byte: rdvl, halfword: cnth, default: cntw, doubleword: cntd } + compose: + - FnCall: ["svcnt{size_literal}_pat", [], ["{{ svpattern::SV_ALL }}"]] + + - name: svcnt{size_literal}_pat + attr: [*sve-unstable] + doc: Count the number of {size}-bit elements in a vector + arguments: [] + static_defs: ["const PATTERN: svpattern"] + return_type: u64 + assert_instr: + - [rdvl, "PATTERN = {{ svpattern::SV_ALL }}"] + - ["cnt{size_literal}", "PATTERN = {{ svpattern::SV_MUL4 }}"] + types: [i8] + compose: + - LLVMLink: + name: cnt{size_literal} + arguments: ["pattern: svpattern"] + - FnCall: ["{llvm_link}", [$PATTERN]] + + - name: svcnt{size_literal}_pat + attr: [*sve-unstable] + doc: Count the number of {size}-bit elements in a vector + arguments: [] + static_defs: ["const PATTERN: svpattern"] + return_type: u64 + assert_instr: [["cnt{size_literal}", "PATTERN = {{ svpattern::SV_ALL }}"]] + types: [i16, i32, i64] + compose: + - LLVMLink: + name: cnt{size_literal} + arguments: ["pattern: svpattern"] + - FnCall: ["{llvm_link}", [$PATTERN]] + + - name: svlen[_{type}] + attr: [*sve-unstable] + doc: Count the number of elements in a full vector + arguments: ["_op: {sve_type}"] + return_type: "u64" + types: [i8, u8, i16, u16, i32, u32, f32, i64, u64, f64] + assert_instr: [{ default: { default: "cnt{size_literal}", byte: rdvl } }] + compose: + - FnCall: ["svcnt{size_literal}", []] + + - name: svdup[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a scalar value + arguments: ["op: {type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [mov] + compose: + - LLVMLink: { name: "dup.x.{sve_type}" } + + - name: svdup[_n]_{type}{_mxz} + attr: [*sve-unstable] + doc: Broadcast a scalar value + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { drop: inactive } + assert_instr: [mov] + compose: + - LLVMLink: { name: "dup.{sve_type}" } + + - name: svdup[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a scalar value + arguments: ["op: bool"] + return_type: "{predicate}" + types: [b8, b16, b32, b64] + assert_instr: [sbfx, whilelo] + compose: + - LLVMLink: { name: "dup.x.{sve_type}" } + + - name: svdup_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Broadcast a scalar value + arguments: ["data: {sve_type[0]}", "index: {type[1]}"] + return_type: "{sve_type[0]}" + types: + - [f32, u32] + - [f64, u64] + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + - [u8, u8] + - [u16, u16] + - [u32, u32] + - [u64, u64] + assert_instr: [tbl] + compose: + - FnCall: + - svtbl_{type[0]} + - - $data + - FnCall: ["svdup_n_{type[1]}", [$index]] + + - name: svdupq_lane[_{type}] + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + arguments: ["data: {sve_type}", "index: u64"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [tbl] + compose: + - LLVMLink: { name: "dupq.lane.{sve_type}" } + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + arguments: + - "x0: {type}" + - "x1: {type}" + - "x2: {type}" + - "x3: {type}" + - "x4: {type}" + - "x5: {type}" + - "x6: {type}" + - "x7: {type}" + - "x8: {type}" + - "x9: {type}" + - "x10: {type}" + - "x11: {type}" + - "x12: {type}" + - "x13: {type}" + - "x14: {type}" + - "x15: {type}" + return_type: "{sve_type}" + types: [i8, u8] + assert_instr: [] + compose: + - LLVMLink: + name: llvm.experimental.vector.insert.{sve_type}.{neon_type} + arguments: ["op0: {sve_type}", "op1: {neon_type}", "idx: i64"] + - Let: + - op + - FnCall: + - "{llvm_link}" + - - FnCall: ["svundef_{type}", [], [], true] + - FnCall: + - "crate::mem::transmute" + - - - $x0 + - $x1 + - $x2 + - $x3 + - $x4 + - $x5 + - $x6 + - $x7 + - $x8 + - $x9 + - $x10 + - $x11 + - $x12 + - $x13 + - $x14 + - $x15 + - 0 + - FnCall: ["svdupq_lane_{type}", [$op, 0]] + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + types: [b8] + arguments: + - "x0: bool" + - "x1: bool" + - "x2: bool" + - "x3: bool" + - "x4: bool" + - "x5: bool" + - "x6: bool" + - "x7: bool" + - "x8: bool" + - "x9: bool" + - "x10: bool" + - "x11: bool" + - "x12: bool" + - "x13: bool" + - "x14: bool" + - "x15: bool" + return_type: "svbool_t" + assert_instr: [] + compose: + - Let: + - op1 + - FnCall: + - svdupq_n_s8 + - - CastAs: [$x0, i8] + - CastAs: [$x1, i8] + - CastAs: [$x2, i8] + - CastAs: [$x3, i8] + - CastAs: [$x4, i8] + - CastAs: [$x5, i8] + - CastAs: [$x6, i8] + - CastAs: [$x7, i8] + - CastAs: [$x8, i8] + - CastAs: [$x9, i8] + - CastAs: [$x10, i8] + - CastAs: [$x11, i8] + - CastAs: [$x12, i8] + - CastAs: [$x13, i8] + - CastAs: [$x14, i8] + - CastAs: [$x15, i8] + - FnCall: + - svcmpne_wide_s8 + - - FnCall: [svptrue_b8, []] + - $op1 + - FnCall: [svdup_n_s64, [0]] + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + arguments: + - "x0: {type}" + - "x1: {type}" + - "x2: {type}" + - "x3: {type}" + - "x4: {type}" + - "x5: {type}" + - "x6: {type}" + - "x7: {type}" + return_type: "{sve_type}" + types: [i16, u16] + assert_instr: [] + compose: + - LLVMLink: + name: llvm.experimental.vector.insert.{sve_type}.{neon_type} + arguments: ["op0: {sve_type}", "op1: {neon_type}", "idx: i64"] + - Let: + - op + - FnCall: + - "{llvm_link}" + - - FnCall: ["svundef_{type}", [], [], true] + - FnCall: + - "crate::mem::transmute" + - - [$x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7] + - 0 + - FnCall: ["svdupq_lane_{type}", [$op, 0]] + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + types: [b16] + arguments: + - "x0: bool" + - "x1: bool" + - "x2: bool" + - "x3: bool" + - "x4: bool" + - "x5: bool" + - "x6: bool" + - "x7: bool" + return_type: svbool_t + assert_instr: [] + compose: + - Let: + - op1 + - FnCall: + - svdupq_n_s16 + - - CastAs: [$x0, i16] + - CastAs: [$x1, i16] + - CastAs: [$x2, i16] + - CastAs: [$x3, i16] + - CastAs: [$x4, i16] + - CastAs: [$x5, i16] + - CastAs: [$x6, i16] + - CastAs: [$x7, i16] + - FnCall: + - svcmpne_wide_s16 + - - FnCall: [svptrue_b16, []] + - $op1 + - FnCall: [svdup_n_s64, [0]] + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + arguments: ["x0: {type}", "x1: {type}", "x2: {type}", "x3: {type}"] + return_type: "{sve_type}" + types: [f32, i32, u32] + assert_instr: [] + compose: + - LLVMLink: + name: llvm.experimental.vector.insert.{sve_type}.{neon_type} + arguments: ["op0: {sve_type}", "op1: {neon_type}", "idx: i64"] + - Let: + - op + - FnCall: + - "{llvm_link}" + - - FnCall: ["svundef_{type}", [], [], true] + - FnCall: ["crate::mem::transmute", [[$x0, $x1, $x2, $x3]]] + - 0 + - FnCall: ["svdupq_lane_{type}", [$op, 0]] + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + types: [b32] + arguments: ["x0: bool", "x1: bool", "x2: bool", "x3: bool"] + return_type: "svbool_t" + assert_instr: [] + compose: + - Let: + - op1 + - FnCall: + - svdupq_n_s32 + - - CastAs: [$x0, i32] + - CastAs: [$x1, i32] + - CastAs: [$x2, i32] + - CastAs: [$x3, i32] + - FnCall: + - svcmpne_wide_s32 + - - FnCall: [svptrue_b32, []] + - $op1 + - FnCall: [svdup_n_s64, [0]] + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + arguments: ["x0: {type}", "x1: {type}"] + return_type: "{sve_type}" + types: [f64, i64, u64] + assert_instr: [] + compose: + - LLVMLink: + name: llvm.experimental.vector.insert.{sve_type}.{neon_type} + arguments: ["op0: {sve_type}", "op1: {neon_type}", "idx: i64"] + - Let: + - op + - FnCall: + - "{llvm_link}" + - - FnCall: ["svundef_{type}", [], [], true] + - FnCall: ["crate::mem::transmute", [[$x0, $x1]]] + - 0 + - FnCall: ["svdupq_lane_{type}", [$op, 0]] + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + types: [b64] + arguments: ["x0: bool", "x1: bool"] + return_type: "svbool_t" + assert_instr: [] + compose: + - Let: + - op1 + - FnCall: [svdupq_n_s64, [CastAs: [$x0, i64], CastAs: [$x1, i64]]] + - FnCall: + - svcmpne_s64 + - - FnCall: [svptrue_b64, []] + - $op1 + - FnCall: [svdup_n_s64, [0]] + + - name: svcreate2[_{type}] + attr: [*sve-unstable] + doc: Create a tuple of two vectors + arguments: ["x0: {sve_type}", "x1: {sve_type}"] + return_type: "{sve_type_x2}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_create2", [$x0, $x1], [], true] + + - name: svcreate3[_{type}] + attr: [*sve-unstable] + doc: Create a tuple of three vectors + arguments: ["x0: {sve_type}", "x1: {sve_type}", "x2: {sve_type}"] + return_type: "{sve_type_x3}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_create3", [$x0, $x1, $x2], [], true] + + - name: svcreate4[_{type}] + attr: [*sve-unstable] + doc: Create a tuple of four vectors + arguments: + ["x0: {sve_type}", "x1: {sve_type}", "x2: {sve_type}", "x3: {sve_type}"] + return_type: "{sve_type_x4}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_create4", [$x0, $x1, $x2, $x3], [], true] + + - name: svundef_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized vector + arguments: [] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["svdup_n_{type}", ["0"]] + + - name: svundef_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized vector + arguments: [] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [] + compose: + - FnCall: ["svdup_n_{type}", ["0{type}"]] + + - name: svundef2_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized tuple of two vectors + arguments: [] + return_type: "{sve_type_x2}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: + - "svcreate2_{type}" + - - FnCall: ["svdup_n_{type}", ["0"]] + - FnCall: ["svdup_n_{type}", ["0"]] + + - name: svundef2_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized tuple of two vectors + arguments: [] + return_type: "{sve_type_x2}" + types: [f32, f64] + assert_instr: [] + compose: + - FnCall: + - "svcreate2_{type}" + - - FnCall: ["svdup_n_{type}", ["0{type}"]] + - FnCall: ["svdup_n_{type}", ["0{type}"]] + + - name: svundef3_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized tuple of three vectors + arguments: [] + return_type: "{sve_type_x3}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: + - "svcreate3_{type}" + - - FnCall: ["svdup_n_{type}", ["0"]] + - FnCall: ["svdup_n_{type}", ["0"]] + - FnCall: ["svdup_n_{type}", ["0"]] + + - name: svundef3_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized tuple of three vectors + arguments: [] + return_type: "{sve_type_x3}" + types: [f32, f64] + assert_instr: [] + compose: + - FnCall: + - "svcreate3_{type}" + - - FnCall: ["svdup_n_{type}", ["0{type}"]] + - FnCall: ["svdup_n_{type}", ["0{type}"]] + - FnCall: ["svdup_n_{type}", ["0{type}"]] + + - name: svundef4_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized tuple of four vectors + arguments: [] + return_type: "{sve_type_x4}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: + - "svcreate4_{type}" + - - FnCall: ["svdup_n_{type}", ["0"]] + - FnCall: ["svdup_n_{type}", ["0"]] + - FnCall: ["svdup_n_{type}", ["0"]] + - FnCall: ["svdup_n_{type}", ["0"]] + + - name: svundef4_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized tuple of four vectors + arguments: [] + return_type: "{sve_type_x4}" + types: [f32, f64] + assert_instr: [] + compose: + - FnCall: + - "svcreate4_{type}" + - - FnCall: ["svdup_n_{type}", ["0{type}"]] + - FnCall: ["svdup_n_{type}", ["0{type}"]] + - FnCall: ["svdup_n_{type}", ["0{type}"]] + - FnCall: ["svdup_n_{type}", ["0{type}"]] + + - name: svindex_{type} + attr: [*sve-unstable] + doc: Create linear series + arguments: ["base: {type}", "step: {type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [index] + compose: + - LLVMLink: { name: "index.{sve_type}" } + + - name: svget2[_{type}] + attr: [*sve-unstable] + doc: Extract one vector from a tuple of two vectors + arguments: ["tuple: {sve_type_x2}"] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, range: [0, 1] }] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_get", [$tuple], ["_", "_", "{{IMM_INDEX}}"], true] + + - name: svget3[_{type}] + attr: [*sve-unstable] + doc: Extract one vector from a tuple of three vectors + arguments: ["tuple: {sve_type_x3}"] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, range: [0, 2] }] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_get", [$tuple], ["_", "_", "{{IMM_INDEX}}"], true] + + - name: svget4[_{type}] + attr: [*sve-unstable] + doc: Extract one vector from a tuple of four vectors + arguments: ["tuple: {sve_type_x4}"] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, range: [0, 3] }] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_get", [$tuple], ["_", "_", "{{IMM_INDEX}}"], true] + + - name: svset2[_{type}] + attr: [*sve-unstable] + doc: Change one vector in a tuple of two vectors + arguments: ["tuple: {sve_type_x2}", "x: {sve_type}"] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, range: [0, 1] }] + return_type: "{sve_type_x2}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_set", [$tuple, $x], ["_", "_", "{{IMM_INDEX}}"], true] + + - name: svset3[_{type}] + attr: [*sve-unstable] + doc: Change one vector in a tuple of three vectors + arguments: ["tuple: {sve_type_x3}", "x: {sve_type}"] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, range: [0, 2] }] + return_type: "{sve_type_x3}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_set", [$tuple, $x], ["_", "_", "{{IMM_INDEX}}"], true] + + - name: svset4[_{type}] + attr: [*sve-unstable] + doc: Change one vector in a tuple of four vectors + arguments: ["tuple: {sve_type_x4}", "x: {sve_type}"] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, range: [0, 3] }] + return_type: "{sve_type_x4}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_set", [$tuple, $x], ["_", "_", "{{IMM_INDEX}}"], true] + + - name: svzip1[_{type}] + attr: [*sve-unstable] + doc: Interleave elements from low halves of two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [zip1] + compose: + - LLVMLink: { name: "zip1.{sve_type}" } + + - name: svzip1_{type} + attr: [*sve-unstable] + doc: Interleave elements from low halves of two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [b8, b16, b32, b64] + assert_instr: [zip1] + compose: + - LLVMLink: { name: "zip1.{sve_type}" } + + - name: svzip1q[_{type}] + attr: [*sve-unstable] + doc: Interleave quadwords from low halves of two inputs + target_features: [f64mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [zip1] + compose: + - LLVMLink: { name: "zip1q.{sve_type}" } + + - name: svzip2[_{type}] + attr: [*sve-unstable] + doc: Interleave elements from high halves of two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [zip2] + compose: + - LLVMLink: { name: "zip2.{sve_type}" } + + - name: svzip2_{type} + attr: [*sve-unstable] + doc: Interleave elements from high halves of two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [b8, b16, b32, b64] + assert_instr: [zip2] + compose: + - LLVMLink: { name: "zip2.{sve_type}" } + + - name: svzip2q[_{type}] + attr: [*sve-unstable] + doc: Interleave quadwords from high halves of two inputs + target_features: [f64mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [zip2] + compose: + - LLVMLink: { name: "zip2q.{sve_type}" } + + - name: svuzp1[_{type}] + attr: [*sve-unstable] + doc: Concatenate even elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [uzp1] + compose: + - LLVMLink: { name: "uzp1.{sve_type}" } + + - name: svuzp1_{type} + attr: [*sve-unstable] + doc: Concatenate even elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [b8, b16, b32, b64] + assert_instr: [uzp1] + compose: + - LLVMLink: { name: "uzp1.{sve_type}" } + + - name: svuzp1q[_{type}] + attr: [*sve-unstable] + doc: Concatenate even quadwords from two inputs + target_features: [f64mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [uzp1] + compose: + - LLVMLink: { name: "uzp1q.{sve_type}" } + + - name: svuzp2[_{type}] + attr: [*sve-unstable] + doc: Concatenate odd elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [uzp2] + compose: + - LLVMLink: { name: "uzp2.{sve_type}" } + + - name: svuzp2_{type} + attr: [*sve-unstable] + doc: Concatenate odd elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [b8, b16, b32, b64] + assert_instr: [uzp2] + compose: + - LLVMLink: { name: "uzp2.{sve_type}" } + + - name: svuzp2q[_{type}] + attr: [*sve-unstable] + doc: Concatenate odd quadwords from two inputs + target_features: [f64mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [uzp2] + compose: + - LLVMLink: { name: "uzp2q.{sve_type}" } + + - name: svtrn1[_{type}] + attr: [*sve-unstable] + doc: Interleave even elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [trn1] + compose: + - LLVMLink: { name: "trn1.{sve_type}" } + + - name: svtrn1_{type} + attr: [*sve-unstable] + doc: Interleave even elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [b8, b16, b32, b64] + assert_instr: [trn1] + compose: + - LLVMLink: { name: "trn1.{sve_type}" } + + - name: svtrn1q[_{type}] + attr: [*sve-unstable] + doc: Interleave even quadwords from two inputs + target_features: [f64mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [trn1] + compose: + - LLVMLink: { name: "trn1q.{sve_type}" } + + - name: svtrn2[_{type}] + attr: [*sve-unstable] + doc: Interleave odd elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [trn2] + compose: + - LLVMLink: { name: "trn2.{sve_type}" } + + - name: svtrn2_{type} + attr: [*sve-unstable] + doc: Interleave odd elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [b8, b16, b32, b64] + assert_instr: [trn2] + compose: + - LLVMLink: { name: "trn2.{sve_type}" } + + - name: svtrn2q[_{type}] + attr: [*sve-unstable] + doc: Interleave odd quadwords from two inputs + target_features: [f64mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [trn2] + compose: + - LLVMLink: { name: "trn2q.{sve_type}" } + + - name: svrev[_{type}] + attr: [*sve-unstable] + doc: Reverse all elements + arguments: ["op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [rev] + compose: + - LLVMLink: { name: "rev.{sve_type}" } + + - name: svrev_{type} + attr: [*sve-unstable] + doc: Reverse all elements + arguments: ["op: {sve_type}"] + return_type: "{sve_type}" + types: [b8, b16, b32, b64] + assert_instr: [rev] + compose: + - LLVMLink: { name: "rev.{sve_type}" } + + - name: svrevb[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Reverse bytes within elements + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [i16, i32, i64, u16, u32, u64] + zeroing_method: { drop: "inactive" } + assert_instr: [revb] + compose: + - LLVMLink: { name: "revb.{sve_type}" } + + - name: svrevh[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Reverse halfwords within elements + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [i32, i64, u32, u64] + zeroing_method: { drop: "inactive" } + assert_instr: [revh] + compose: + - LLVMLink: { name: "revh.{sve_type}" } + + - name: svrevw[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Reverse words within elements + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [i64, u64] + zeroing_method: { drop: "inactive" } + assert_instr: [revw] + compose: + - LLVMLink: { name: "revw.{sve_type}" } + + - name: svrbit[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Reverse bits + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { drop: "inactive" } + assert_instr: [rbit] + compose: + - LLVMLink: { name: "rbit.{sve_type}" } + + - name: svext[_{type}] + attr: [*sve-unstable] + doc: Extract vector from pair of vectors + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM3: i32"] + constraints: [{ variable: IMM3, sve_max_elems_type: "{type}" }] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [[ext, "IMM3 = 1"]] + compose: + - LLVMLink: + name: ext.{sve_type} + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]] + + - name: svsplice[_{type}] + attr: [*sve-unstable] + doc: Splice two vectors under predicate control + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [splice] + compose: + - LLVMLink: { name: "splice.{sve_type}" } + + - name: svinsr[_n_{type}] + attr: [*sve-unstable] + doc: Insert scalar in shifted vector + arguments: ["op1: {sve_type}", "op2: {type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [insr] + compose: + - LLVMLink: { name: "insr.{sve_type}" } + + - name: svld1[_{type}] + attr: [*sve-unstable] + doc: Unextended load + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld1{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ld1.{sve_type}" } + + - name: svld1_vnum[_{type}] + attr: [*sve-unstable] + doc: Unextended load + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld1{size_literal}"] + test: { load: 0 } + compose: + - FnCall: + - "svld1_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svld1_gather_[{type[0]}]index[_{type[1]}] + attr: [*sve-unstable] + doc: Unextended load + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate[0]}", "base: *{type[1]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["ld1{size_literal[0]}"] + test: { load: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ld1.gather.{type_kind[0].su}xtw.index.{sve_type[1]}" + doubleword: + LLVMLink: + name: "ld1.gather.index.{sve_type[1]}" + + - name: svld1_gather_[{type[0]}]offset[_{type[1]}] + attr: [*sve-unstable] + doc: Unextended load + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate[0]}", "base: *{type[1]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["ld1{size_literal[0]}"] + test: { load: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ld1.gather.{type_kind[0].su}xtw.{sve_type[1]}" + doubleword: + LLVMLink: + name: "ld1.gather.{sve_type[1]}" + + - name: svld1_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Unextended load + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ld1{size_literal[0]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ld1.gather.scalar.offset.{sve_type[1]}.{sve_type[0]}" + + - name: svld1_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Unextended load + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ld1{size_literal[0]}"] + test: { load: 1 } + compose: + - FnCall: + - "svld1_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svld1_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Unextended load + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ld1{size_literal[0]}"] + test: { load: 1 } + compose: + - FnCall: + - "svld1_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[0]}"]] + + - name: svld1s{size_literal[2]}_gather_[{type[0]}]index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [i32, u32], i16] + - [[i64, u64], [i64, u64], [i16, i32]] + assert_instr: ["ld1s{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ld1.gather.{type_kind[0].su}xtw.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ld1.gather.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $indices]] + + - name: svld1u{size_literal[2]}_gather_[{type[0]}]index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [u32, i32], u16] + - [[i64, u64], [u64, i64], [u16, u32]] + assert_instr: ["ld1{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ld1.gather.{type_kind[0].su}xtw.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ld1.gather.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $indices]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svld1s{size_literal[2]}_gather_[{type[0]}]offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [i32, u32], [i8, i16]] + - [[i64, u64], [i64, u64], [i8, i16, i32]] + assert_instr: ["ld1s{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ld1.gather.{type_kind[0].su}xtw.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ld1.gather.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]] + + - name: svld1u{size_literal[2]}_gather_[{type[0]}]offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [u32, i32], [u8, u16]] + - [[i64, u64], [u64, i64], [u8, u16, u32]] + assert_instr: ["ld1{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ld1.gather.{type_kind[0].su}xtw.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ld1.gather.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svld1s{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["ld1s{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ld1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]] + + - name: svld1u{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [u32, i32], [u8, u16]] + - [u64, [u64, i64], [u8, u16, u32]] + assert_instr: ["ld1{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ld1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svld1s{size_literal[2]}_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["ld1s{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svld1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svld1u{size_literal[2]}_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [u8, u16]] + - [u64, [i64, u64], [u8, u16, u32]] + assert_instr: ["ld1{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svld1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svld1s{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], i16] + - [u64, [i64, u64], [i16, i32]] + assert_instr: ["ld1s{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svld1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + + - name: svld1u{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], u16] + - [u64, [i64, u64], [u16, u32]] + assert_instr: ["ld1{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svld1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + + - name: svldnt1[_{type}] + attr: [*sve-unstable] + doc: Unextended load, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ldnt1{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ldnt1.{sve_type}" } + + - name: svldnt1_vnum[_{type}] + attr: [*sve-unstable] + doc: Unextended load, non-temporal + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + - non_temporal + arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ldnt1{size_literal}"] + test: { load: 0 } + compose: + - FnCall: + - "svldnt1_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svld1s{size_literal[1]}_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and sign-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], i8] + - [[i32, i64, u32, u64], i16] + - [[i64, u64], i32] + assert_instr: ["ld1s{size_literal[1]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ld1.{sve_type[0] as {type[1]}}" + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0] as {type[1]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $base]] + + - name: svld1u{size_literal[1]}_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and zero-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], u8] + - [[i32, i64, u32, u64], u16] + - [[i64, u64], u32] + assert_instr: ["ld1{size_literal[1]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ld1.{sve_type[0] as {type[1]}}" + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0] as {type[1]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $base]] + - [Type: "{sve_type[0] as {type[1]}}", _] + + - name: svld1s{size_literal[1]}_vnum_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and sign-extend + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], i8] + - [[i32, i64, u32, u64], i16] + - [[i64, u64], i32] + assert_instr: ["ld1s{size_literal[1]}"] + test: { load: 1 } + compose: + - FnCall: + - "svld1s{size_literal[1]}_{type[0]}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: + [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svld1u{size_literal[1]}_vnum_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and zero-extend + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], u8] + - [[i32, i64, u32, u64], u16] + - [[i64, u64], u32] + assert_instr: ["ld1{size_literal[1]}"] + test: { load: 1 } + compose: + - FnCall: + - "svld1u{size_literal[1]}_{type[0]}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: + [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svld2[_{type}] + attr: [*sve-unstable] + doc: Load two-element tuples into two vectors + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type_x2}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld2{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ld2.sret.{sve_type}" } + + - name: svld2_vnum[_{type}] + attr: [*sve-unstable] + doc: Load two-element tuples into two vectors + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"] + return_type: "{sve_type_x2}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld2{size_literal}"] + test: { load: 0 } + compose: + - FnCall: + - "svld2_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svld3[_{type}] + attr: [*sve-unstable] + doc: Load three-element tuples into three vectors + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type_x3}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld3{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ld3.sret.{sve_type}" } + + - name: svld3_vnum[_{type}] + attr: [*sve-unstable] + doc: Load three-element tuples into three vectors + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"] + return_type: "{sve_type_x3}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld3{size_literal}"] + test: { load: 0 } + compose: + - FnCall: + - "svld3_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svld4[_{type}] + attr: [*sve-unstable] + doc: Load four-element tuples into four vectors + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type_x4}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld4{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ld4.sret.{sve_type}" } + + - name: svld4_vnum[_{type}] + attr: [*sve-unstable] + doc: Load four-element tuples into four vectors + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"] + return_type: "{sve_type_x4}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld4{size_literal}"] + test: { load: 0 } + compose: + - FnCall: + - "svld4_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svld1rq[_{type}] + attr: [*sve-unstable] + doc: Load and replicate 128 bits of data + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld1rq{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ld1rq.{sve_type}" } + + - name: svld1ro[_{type}] + attr: [*sve-unstable] + doc: Load and replicate 256 bits of data + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + target_features: [f64mm] + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld1ro{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ld1ro.{sve_type}" } + + - name: svldnf1[_{type}] + attr: [*sve-unstable] + doc: Unextended load, non-faulting + safety: + unsafe: + - pointer_offset: predicated_non_faulting + - dereference: predicated_non_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ldnf1{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ldnf1.{sve_type}" } + + - name: svldnf1_vnum[_{type}] + attr: [*sve-unstable] + doc: Unextended load, non-faulting + safety: + unsafe: + - pointer_offset_vnum: predicated_non_faulting + - dereference: predicated_non_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ldnf1{size_literal}"] + test: { load: 0 } + compose: + - FnCall: + - "svldnf1_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svldnf1s{size_literal[1]}_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and sign-extend, non-faulting + safety: + unsafe: + - pointer_offset: predicated_non_faulting + - dereference: predicated_non_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], i8] + - [[i32, i64, u32, u64], i16] + - [[i64, u64], i32] + assert_instr: ["ldnf1s{size_literal[1]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ldnf1.{sve_type[0] as {type[1]}}" + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0] as {type[1]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $base]] + + - name: svldnf1u{size_literal[1]}_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and zero-extend, non-faulting + safety: + unsafe: + - pointer_offset: predicated_non_faulting + - dereference: predicated_non_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], u8] + - [[i32, i64, u32, u64], u16] + - [[i64, u64], u32] + assert_instr: ["ldnf1{size_literal[1]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ldnf1.{sve_type[0] as {type[1]}}" + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0] as {type[1]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $base]] + - [Type: "{sve_type[0] as {type[1]}}", _] + + - name: svldnf1s{size_literal[1]}_vnum_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and sign-extend, non-faulting + safety: + unsafe: + - pointer_offset_vnum: predicated_non_faulting + - dereference: predicated_non_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], i8] + - [[i32, i64, u32, u64], i16] + - [[i64, u64], i32] + assert_instr: ["ldnf1s{size_literal[1]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldnf1s{size_literal[1]}_{type[0]}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: + [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svldnf1u{size_literal[1]}_vnum_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and zero-extend, non-faulting + safety: + unsafe: + - pointer_offset_vnum: predicated_non_faulting + - dereference: predicated_non_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], u8] + - [[i32, i64, u32, u64], u16] + - [[i64, u64], u32] + assert_instr: ["ldnf1{size_literal[1]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldnf1u{size_literal[1]}_{type[0]}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: + [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svldff1[_{type}] + attr: [*sve-unstable] + doc: Unextended load, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ldff1{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ldff1.{sve_type}" } + + - name: svldff1_vnum[_{type}] + attr: [*sve-unstable] + doc: Unextended load, first-faulting + safety: + unsafe: + - pointer_offset_vnum: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ldff1{size_literal}"] + test: { load: 0 } + compose: + - FnCall: + - "svldff1_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svldff1s{size_literal[1]}_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and sign-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], i8] + - [[i32, i64, u32, u64], i16] + - [[i64, u64], i32] + assert_instr: ["ldff1s{size_literal[1]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ldff1.{sve_type[0] as {type[1]}}" + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0] as {type[1]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $base]] + + - name: svldff1u{size_literal[1]}_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and zero-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], u8] + - [[i32, i64, u32, u64], u16] + - [[i64, u64], u32] + assert_instr: ["ldff1{size_literal[1]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ldff1.{sve_type[0] as {type[1]}}" + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0] as {type[1]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $base]] + - [Type: "{sve_type[0] as {type[1]}}", _] + + - name: svldff1s{size_literal[1]}_vnum_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and sign-extend, first-faulting + safety: + unsafe: + - pointer_offset_vnum: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], i8] + - [[i32, i64, u32, u64], i16] + - [[i64, u64], i32] + assert_instr: ["ldff1s{size_literal[1]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldff1s{size_literal[1]}_{type[0]}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: + [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svldff1u{size_literal[1]}_vnum_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and zero-extend, first-faulting + safety: + unsafe: + - pointer_offset_vnum: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], u8] + - [[i32, i64, u32, u64], u16] + - [[i64, u64], u32] + assert_instr: ["ldff1{size_literal[1]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldff1u{size_literal[1]}_{type[0]}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: + [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svldff1_gather_[{type[0]}]index[_{type[1]}] + attr: [*sve-unstable] + doc: Unextended load, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: + ["pg: {predicate[0]}", "base: *{type[1]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["ldff1{size_literal[0]}"] + test: { load: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldff1.gather.{type_kind[0].su}xtw.index.{sve_type[1]}" + doubleword: + LLVMLink: + name: "ldff1.gather.index.{sve_type[1]}" + + - name: svldff1_gather_[{type[0]}]offset[_{type[1]}] + attr: [*sve-unstable] + doc: Unextended load, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: + ["pg: {predicate[0]}", "base: *{type[1]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["ldff1{size_literal[0]}"] + test: { load: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldff1.gather.{type_kind[0].su}xtw.{sve_type[1]}" + doubleword: + LLVMLink: + name: "ldff1.gather.{sve_type[1]}" + + - name: svldff1_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Unextended load, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ldff1{size_literal[0]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ldff1.gather.scalar.offset.{sve_type[1]}.{sve_type[0]}" + + - name: svldff1_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Unextended load, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ldff1{size_literal[0]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldff1_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svldff1_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Unextended load, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ldff1{size_literal[0]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldff1_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[0]}"]] + + - name: svldff1s{size_literal[2]}_gather_[{type[0]}]index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [i32, u32], i16] + - [[i64, u64], [i64, u64], [i16, i32]] + assert_instr: ["ldff1s{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldff1.gather.{type_kind[0].su}xtw.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ldff1.gather.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $indices]] + + - name: svldff1u{size_literal[2]}_gather_[{type[0]}]index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [u32, i32], u16] + - [[i64, u64], [u64, i64], [u16, u32]] + assert_instr: ["ldff1{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldff1.gather.{type_kind[0].su}xtw.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ldff1.gather.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $indices]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svldff1s{size_literal[2]}_gather_[{type[0]}]offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [i32, u32], [i8, i16]] + - [[i64, u64], [i64, u64], [i8, i16, i32]] + assert_instr: ["ldff1s{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldff1.gather.{type_kind[0].su}xtw.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ldff1.gather.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]] + + - name: svldff1u{size_literal[2]}_gather_[{type[0]}]offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [u32, i32], [u8, u16]] + - [[i64, u64], [u64, i64], [u8, u16, u32]] + assert_instr: ["ldff1{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldff1.gather.{type_kind[0].su}xtw.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ldff1.gather.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svldff1s{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["ldff1s{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ldff1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]] + + - name: svldff1u{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [u32, i32], [u8, u16]] + - [u64, [u64, i64], [u8, u16, u32]] + assert_instr: ["ldff1{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ldff1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svldff1s{size_literal[2]}_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + doc: Load {size[2]}-bit data and sign-extend, first-faulting + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["ldff1s{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldff1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svldff1u{size_literal[2]}_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [u8, u16]] + - [u64, [i64, u64], [u8, u16, u32]] + assert_instr: ["ldff1{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldff1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svldff1s{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], i16] + - [u64, [i64, u64], [i16, i32]] + assert_instr: ["ldff1s{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldff1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + + - name: svldff1u{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], u16] + - [u64, [i64, u64], [u16, u32]] + assert_instr: ["ldff1{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldff1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + + - name: svrdffr_z + attr: [*sve-unstable] + doc: Read FFR, returning predicate of succesfully loaded elements + arguments: ["pg: svbool_t"] + return_type: svbool_t + assert_instr: [rdffr] + compose: + - LLVMLink: { name: "rdffr.z" } + + - name: svrdffr + attr: [*sve-unstable] + doc: Read FFR, returning predicate of succesfully loaded elements + arguments: [] + return_type: svbool_t + assert_instr: [rdffr] + compose: + - FnCall: [svrdffr_z, [FnCall: [svptrue_b8, []]]] + + - name: svsetffr + attr: [*sve-unstable] + doc: Initialize the first-fault register to all-true + arguments: [] + assert_instr: [setffr] + compose: + - LLVMLink: { name: "setffr" } + + - name: svwrffr + attr: [*sve-unstable] + doc: Write to the first-fault register + arguments: ["op: svbool_t"] + assert_instr: [wrffr] + compose: + - LLVMLink: { name: "wrffr" } + + - name: svqinc{size_literal[1]}[_n_{type[0]}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type[1]}" + default: word + byte: byte + halfword: halfword + doubleword: doubleword + doc: Saturating increment by number of {textual_size} elements + arguments: ["op: {type[0]}"] + static_defs: ["const IMM_FACTOR: i32"] + return_type: "{type[0]}" + types: + - [[i32, i64, u32, u64], [i8, i16, i32, i64]] + assert_instr: + - ["{type_kind[0].su}qinc{size_literal[1]}", "IMM_FACTOR = 1"] + compose: + - FnCall: + - "svqinc{size_literal[1]}_pat_n_{type[0]}" + - [$op] + - ["{{svpattern::SV_ALL}}", $IMM_FACTOR] + + - name: svqinc{size_literal[1]}_pat[_n_{type[0]}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type[1]}" + default: word + byte: byte + halfword: halfword + doubleword: doubleword + doc: Saturating increment by number of {textual_size} elements + arguments: ["op: {type[0]}"] + static_defs: ["const PATTERN: svpattern", "const IMM_FACTOR: i32"] + constraints: [{ variable: IMM_FACTOR, range: [1, 16] }] + return_type: "{type[0]}" + types: + - [[i32, i64, u32, u64], [i8, i16, i32, i64]] + assert_instr: + - - "{type_kind[0].su}qinc{size_literal[1]}" + - "PATTERN = {{svpattern::SV_ALL}}, IMM_FACTOR = 1" + compose: + - LLVMLink: + name: "{type_kind[0].su}qinc{size_literal[1]}.n{size[0]}" + arguments: ["op: {type[0]}", "pattern: svpattern", "imm_factor: i32"] + return_type: "{type[0]}" + - FnCall: ["{llvm_link}", [$op, $PATTERN, $IMM_FACTOR]] + + - name: svqinc{size_literal}[_{type}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type}" + default: word + halfword: halfword + doubleword: doubleword + doc: Saturating increment by number of {textual_size} elements + arguments: ["op: {sve_type}"] + static_defs: ["const IMM_FACTOR: i32"] + return_type: "{sve_type}" + types: [i16, u16, i32, u32, i64, u64] + assert_instr: [["{type_kind.su}qinc{size_literal}", "IMM_FACTOR = 1"]] + compose: + - FnCall: + - "svqinc{size_literal}_pat_{type}" + - [$op] + - ["{{svpattern::SV_ALL}}", $IMM_FACTOR] + + - name: svqinc{size_literal}_pat[_{type}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type}" + default: word + halfword: halfword + doubleword: doubleword + doc: Saturating increment by number of {textual_size} elements + arguments: ["op: {sve_type}"] + static_defs: ["const PATTERN: svpattern", "const IMM_FACTOR: i32"] + constraints: [{ variable: IMM_FACTOR, range: [1, 16] }] + return_type: "{sve_type}" + types: [i16, u16, i32, u32, i64, u64] + assert_instr: + - - "{type_kind.su}qinc{size_literal}" + - "PATTERN = {{svpattern::SV_ALL}}, IMM_FACTOR = 1" + compose: + - LLVMLink: + name: "{type_kind.su}qinc{size_literal}.{sve_type}" + arguments: ["op: {sve_type}", "pattern: svpattern", "imm_factor: i32"] + return_type: "{sve_type}" + - FnCall: ["{llvm_link}", [$op, $PATTERN, $IMM_FACTOR]] + + - name: svqdec{size_literal[1]}[_n_{type[0]}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type[1]}" + default: word + byte: byte + halfword: halfword + doubleword: doubleword + doc: Saturating decrement by number of {textual_size} elements + arguments: ["op: {type[0]}"] + static_defs: ["const IMM_FACTOR: i32"] + return_type: "{type[0]}" + types: + - [[i32, i64, u32, u64], [i8, i16, i32, i64]] + assert_instr: + - ["{type_kind[0].su}qdec{size_literal[1]}", "IMM_FACTOR = 1"] + compose: + - FnCall: + - "svqdec{size_literal[1]}_pat_n_{type[0]}" + - [$op] + - ["{{svpattern::SV_ALL}}", $IMM_FACTOR] + + - name: svqdec{size_literal[1]}_pat[_n_{type[0]}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type[1]}" + default: word + byte: byte + halfword: halfword + doubleword: doubleword + doc: Saturating decrement by number of {textual_size} elements + arguments: ["op: {type[0]}"] + static_defs: ["const PATTERN: svpattern", "const IMM_FACTOR: i32"] + constraints: [{ variable: IMM_FACTOR, range: [1, 16] }] + return_type: "{type[0]}" + types: + - [[i32, i64, u32, u64], [i8, i16, i32, i64]] + assert_instr: + - - "{type_kind[0].su}qdec{size_literal[1]}" + - "PATTERN = {{svpattern::SV_ALL}}, IMM_FACTOR = 1" + compose: + - LLVMLink: + name: "{type_kind[0].su}qdec{size_literal[1]}.n{size[0]}" + arguments: ["op: {type[0]}", "pattern: svpattern", "imm_factor: i32"] + return_type: "{type[0]}" + - FnCall: ["{llvm_link}", [$op, $PATTERN, $IMM_FACTOR]] + + - name: svqdec{size_literal}[_{type}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type}" + default: word + halfword: halfword + doubleword: doubleword + doc: Saturating decrement by number of {textual_size} elements + arguments: ["op: {sve_type}"] + static_defs: ["const IMM_FACTOR: i32"] + return_type: "{sve_type}" + types: [i16, u16, i32, u32, i64, u64] + assert_instr: [["{type_kind.su}qdec{size_literal}", "IMM_FACTOR = 1"]] + compose: + - FnCall: + - "svqdec{size_literal}_pat_{type}" + - [$op] + - ["{{svpattern::SV_ALL}}", $IMM_FACTOR] + + - name: svqdec{size_literal}_pat[_{type}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type}" + default: word + halfword: halfword + doubleword: doubleword + doc: Saturating decrement by number of {textual_size} elements + arguments: ["op: {sve_type}"] + static_defs: ["const PATTERN: svpattern", "const IMM_FACTOR: i32"] + constraints: [{ variable: IMM_FACTOR, range: [1, 16] }] + return_type: "{sve_type}" + types: [i16, u16, i32, u32, i64, u64] + assert_instr: + - - "{type_kind.su}qdec{size_literal}" + - "PATTERN = {{svpattern::SV_ALL}}, IMM_FACTOR = 1" + compose: + - LLVMLink: + name: "{type_kind.su}qdec{size_literal}.{sve_type}" + arguments: ["op: {sve_type}", "pattern: svpattern", "imm_factor: i32"] + return_type: "{sve_type}" + - FnCall: ["{llvm_link}", [$op, $PATTERN, $IMM_FACTOR]] + + - name: svst1[_{type}] + attr: [*sve-unstable] + doc: Non-truncating store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *mut {type}", "data: {sve_type}"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st1{size_literal}"] + test: { store: 0 } + compose: + - LLVMLink: + name: "st1.{sve_type}" + arguments: + - "data: {sve_type}" + - "pg: {predicate}" + - "ptr: *mut {type}" + - FnCall: ["{llvm_link}", [$data, $pg, $base]] + + - name: svst1_scatter_[{type[0]}]index[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "indices: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [[i32, u32], [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["st1{size_literal[0]}"] + test: { store: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "st1.scatter.{type_kind[0].su}xtw.index.{sve_type[1]}" + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "indices: {sve_type[0]}" + doubleword: + LLVMLink: + name: "st1.scatter.index.{sve_type[1]}" + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "indices: {sve_type[0]}" + - FnCall: ["{llvm_link}", [$data, $pg, $base, $indices]] + + - name: svst1_scatter_[{type[0]}]offset[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "offsets: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [[i32, u32], [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["st1{size_literal[0]}"] + test: { store: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "st1.scatter.{type_kind[0].su}xtw.{sve_type[1]}" + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "offsets: {sve_type[0]}" + doubleword: + LLVMLink: + name: "st1.scatter.{sve_type[1]}" + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "offsets: {sve_type[0]}" + - FnCall: ["{llvm_link}", [$data, $pg, $base, $offsets]] + + - name: svst1_scatter[_{type[0]}base]_offset[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + - "data: {sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["st1{size_literal[0]}"] + test: { store: 1 } + compose: + - LLVMLink: + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + name: "st1.scatter.scalar.offset.{sve_type[1]}.{sve_type[0]}" + - FnCall: ["{llvm_link}", [$data, $pg, $bases, $offset]] + + - name: svst1_scatter[_{type[0]}base_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: + ["pg: {predicate[0]}", "bases: {sve_type[0]}", "data: {sve_type[1]}"] + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["st1{size_literal[0]}"] + test: { store: 1 } + compose: + - FnCall: + - "svst1_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + - $data + + - name: svst1_scatter[_{type[0]}base]_index[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "index: i64" + - "data: {sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["st1{size_literal[0]}"] + test: { store: 1 } + compose: + - FnCall: + - "svst1_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[0]}"]] + - $data + + - name: svst1{size_literal[2]}_scatter_[{type[0]}]index[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "indices: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [[i32, u32], i32, i16] + - [[i32, u32], u32, u16] + - [[i64, u64], i64, [i16, i32]] + - [[i64, u64], u64, [u16, u32]] + assert_instr: ["st1{size_literal[2]}"] + test: { store: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "st1.scatter.{type_kind[0].su}xtw.index.{sve_type[1] as {type[2]}}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "indices: {sve_type[0]}" + doubleword: + LLVMLink: + name: "st1.scatter.index.{sve_type[1] as {type[2]}}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "indices: {sve_type[0]}" + - FnCall: + - "{llvm_link}" + - [FnCall: ["crate::intrinsics::simd::simd_cast", [$data]], $pg, $base, $indices] + + - name: svst1{size_literal[2]}_scatter_[{type[0]}]offset[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "offsets: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [[i32, u32], i32, [i8, i16]] + - [[i32, u32], u32, [u8, u16]] + - [[i64, u64], i64, [i8, i16, i32]] + - [[i64, u64], u64, [u8, u16, u32]] + assert_instr: ["st1{size_literal[2]}"] + test: { store: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "st1.scatter.{type_kind[0].su}xtw.{sve_type[1] as {type[2]}}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "offsets: {sve_type[0]}" + doubleword: + LLVMLink: + name: "st1.scatter.{sve_type[1] as {type[2]}}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "offsets: {sve_type[0]}" + - FnCall: + - "{llvm_link}" + - [FnCall: ["crate::intrinsics::simd::simd_cast", [$data]], $pg, $base, $offsets] + + - name: svst1{size_literal[2]}_scatter[_{type[0]}base]_offset[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + - "data: {sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["st1{size_literal[2]}"] + test: { store: 2 } + compose: + - LLVMLink: + name: "st1.scatter.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + - FnCall: + - "{llvm_link}" + - [FnCall: ["crate::intrinsics::simd::simd_cast", [$data]], $pg, $bases, $offset] + + - name: svst1{size_literal[2]}_scatter[_{type[0]}base_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: + ["pg: {predicate[0]}", "bases: {sve_type[0]}", "data: {sve_type[1]}"] + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["st1{size_literal[2]}"] + test: { store: 2 } + compose: + - FnCall: + - "svst1{size_literal[2]}_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + - $data + + - name: svst1{size_literal[2]}_scatter[_{type[0]}base]_index[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "index: i64" + - "data: {sve_type[1]}" + types: + - [u32, [i32, u32], i16] + - [u64, [i64, u64], [i16, i32]] + assert_instr: ["st1{size_literal[2]}"] + test: { store: 2 } + compose: + - FnCall: + - "svst1{size_literal[2]}_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + - $data + + - name: svstnt1[_{type}] + attr: [*sve-unstable] + doc: Non-truncating store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: ["pg: {predicate}", "base: *mut {type}", "data: {sve_type}"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["stnt1{size_literal}"] + test: { store: 0 } + compose: + - LLVMLink: + name: "stnt1.{sve_type}" + arguments: + - "data: {sve_type}" + - "pg: {predicate}" + - "ptr: *mut {type}" + - FnCall: ["{llvm_link}", [$data, $pg, $base]] + + - name: svstnt1_vnum[_{type}] + attr: [*sve-unstable] + doc: Non-truncating store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + ["pg: {predicate}", "base: *mut {type}", "vnum: i64", "data: {sve_type}"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["stnt1{size_literal}"] + test: { store: 0 } + compose: + - FnCall: + - "svstnt1_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + - $data + + - name: svst1{size_literal[1]}[_{type[0]}] + attr: [*sve-unstable] + doc: Truncate to {size[1]} bits and store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate[0]}", "base: *mut {type[1]}", "data: {sve_type[0]}"] + types: + - [[i16, i32, i64], i8] + - [[u16, u32, u64], u8] + - [[i32, i64], i16] + - [[u32, u64], u16] + - [i64, i32] + - [u64, u32] + assert_instr: ["st1{size_literal[1]}"] + test: { store: 1 } + compose: + - LLVMLink: + name: "st1.{sve_type[0] as {type[1]}}" + arguments: + - "data: {sve_type[0] as {type[1]}}" + - "pg: {predicate[0]}" + - "ptr: *mut {type[1]}" + - FnCall: + - "{llvm_link}" + - [FnCall: ["crate::intrinsics::simd::simd_cast", [$data]], $pg, $base] + + - name: svst1{size_literal[1]}_vnum[_{type[0]}] + attr: [*sve-unstable] + doc: Truncate to {size[1]} bits and store + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "vnum: i64" + - "data: {sve_type[0]}" + types: + - [[i16, i32, i64], i8] + - [[u16, u32, u64], u8] + - [[i32, i64], i16] + - [[u32, u64], u16] + - [i64, i32] + - [u64, u32] + assert_instr: ["st1{size_literal[1]}"] + test: { store: 1 } + compose: + - FnCall: + - "svst1{size_literal[1]}_{type[0]}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: + [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize] + - CastAs: [$vnum, isize] + - $data + + - name: svst1_vnum[_{type}] + attr: [*sve-unstable] + doc: Non-truncating store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate}", "base: *mut {type}", "vnum: i64", "data: {sve_type}"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st1{size_literal}"] + test: { store: 0 } + compose: + - FnCall: + - "svst1_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + - $data + + - name: svst2[_{type}] + attr: [*sve-unstable] + doc: Store two vectors into two-element tuples + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *mut {type}", "data: {sve_type_x2}"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st2{size_literal}"] + test: { store: 0 } + compose: + - LLVMLink: + name: "st2.{sve_type}" + arguments: + - "data0: {sve_type}" + - "data1: {sve_type}" + - "pg: {predicate}" + - "ptr: *mut {type}" + - FnCall: + - "{llvm_link}" + - - FnCall: ["svget2_{type}", ["$data"], [0]] + - FnCall: ["svget2_{type}", ["$data"], [1]] + - "$pg" + - "$base" + + - name: svst2_vnum[_{type}] + attr: [*sve-unstable] + doc: Store two vectors into two-element tuples + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + - "pg: {predicate}" + - "base: *mut {type}" + - "vnum: i64" + - "data: {sve_type_x2}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st2{size_literal}"] + test: { store: 0 } + compose: + - FnCall: + - "svst2_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + - $data + + - name: svst3[_{type}] + attr: [*sve-unstable] + doc: Store three vectors into three-element tuples + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *mut {type}", "data: {sve_type_x3}"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st3{size_literal}"] + test: { store: 0 } + compose: + - LLVMLink: + name: "st3.{sve_type}" + arguments: + - "data0: {sve_type}" + - "data1: {sve_type}" + - "data2: {sve_type}" + - "pg: {predicate}" + - "ptr: *mut {type}" + - FnCall: + - "{llvm_link}" + - - FnCall: ["svget3_{type}", ["$data"], [0]] + - FnCall: ["svget3_{type}", ["$data"], [1]] + - FnCall: ["svget3_{type}", ["$data"], [2]] + - "$pg" + - "$base" + + - name: svst3_vnum[_{type}] + attr: [*sve-unstable] + doc: Store three vectors into three-element tuples + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + arguments: + - "pg: {predicate}" + - "base: *mut {type}" + - "vnum: i64" + - "data: {sve_type_x3}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st3{size_literal}"] + test: { store: 0 } + compose: + - FnCall: + - "svst3_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + - $data + + - name: svst4[_{type}] + attr: [*sve-unstable] + doc: Store four vectors into four-element tuples + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *mut {type}", "data: {sve_type_x4}"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st4{size_literal}"] + test: { store: 0 } + compose: + - LLVMLink: + name: "st4.{sve_type}" + arguments: + - "data0: {sve_type}" + - "data1: {sve_type}" + - "data2: {sve_type}" + - "data3: {sve_type}" + - "pg: {predicate}" + - "ptr: *mut {type}" + - FnCall: + - "{llvm_link}" + - - FnCall: ["svget4_{type}", ["$data"], [0]] + - FnCall: ["svget4_{type}", ["$data"], [1]] + - FnCall: ["svget4_{type}", ["$data"], [2]] + - FnCall: ["svget4_{type}", ["$data"], [3]] + - "$pg" + - "$base" + + - name: svst4_vnum[_{type}] + attr: [*sve-unstable] + doc: Store four vectors into four-element tuples + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + arguments: + - "pg: {predicate}" + - "base: *mut {type}" + - "vnum: i64" + - "data: {sve_type_x4}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st4{size_literal}"] + test: { store: 0 } + compose: + - FnCall: + - "svst4_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + - $data + + - name: svtbl[_{type[0]}] + attr: [*sve-unstable] + doc: Table lookup in single-vector table + arguments: ["data: {sve_type[0]}", "indices: {sve_type[1]}"] + return_type: "{sve_type[0]}" + assert_instr: [tbl] + types: + - [f32, u32] + - [f64, u64] + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + - [u8, u8] + - [u16, u16] + - [u32, u32] + - [u64, u64] + compose: + - LLVMLink: { name: "tbl.{sve_type[0]}" } + + - name: svwhilele_{type[1]}[_{type[0]}] + attr: [*sve-unstable] + doc: While incrementing scalar is less than or equal to + arguments: ["op1: {type[0]}", "op2: {type[0]}"] + return_type: "{sve_type[1]}" + types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]] + assert_instr: [{ default: whilele, unsigned: whilels }] + compose: + - MatchKind: + - "{type[0]}" + - default: { LLVMLink: { name: "whilele.{sve_type[1]}.{type[0]}" } } + unsigned: { LLVMLink: { name: "whilels.{sve_type[1]}.{type[0]}" } } + + - name: svwhilelt_{type[1]}[_{type[0]}] + attr: [*sve-unstable] + doc: While incrementing scalar is less than + arguments: ["op1: {type[0]}", "op2: {type[0]}"] + return_type: "{sve_type[1]}" + types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]] + assert_instr: [{ default: whilelt, unsigned: whilelo }] + compose: + - MatchKind: + - "{type[0]}" + - default: { LLVMLink: { name: "whilelt.{sve_type[1]}.{type[0]}" } } + unsigned: { LLVMLink: { name: "whilelo.{sve_type[1]}.{type[0]}" } } + + - name: svmax[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Maximum + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64, f32, f64] + zeroing_method: { select: op1 } + assert_instr: ["{type_kind}max"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.fsu}max.{sve_type}" } + + - name: svmaxnm[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Maximum number + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + zeroing_method: { select: op1 } + assert_instr: [fmaxnm] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.f}maxnm.{sve_type}" } + + - name: svpfalse[_b] + attr: [*sve-unstable] + doc: Set all predicate elements to false + arguments: [] + return_type: "svbool_t" + assert_instr: [pfalse] + compose: + - FnCall: + - "svdupq_n_b8" + - - false + - false + - false + - false + - false + - false + - false + - false + - false + - false + - false + - false + - false + - false + - false + - false + + - name: svptrue_pat_{type} + attr: [*sve-unstable] + doc: Set predicate elements to true + arguments: [] + static_defs: ["const PATTERN: svpattern"] + return_type: "{predicate}" + types: [b8, b16, b32, b64] + assert_instr: [[ptrue, "PATTERN = {{svpattern::SV_ALL}}"]] + compose: + - LLVMLink: + name: ptrue.{sve_type} + arguments: ["pattern: svpattern"] + - FnCall: ["{llvm_link}", [$PATTERN]] + + - name: svptrue_{type} + attr: [*sve-unstable] + doc: Set predicate elements to true + arguments: [] + return_type: "svbool_t" + types: [b8, b16, b32, b64] + assert_instr: [ptrue] + compose: + - FnCall: ["svptrue_pat_{type}", [], ["{{svpattern::SV_ALL}}"]] + + - name: svptest_any + attr: [*sve-unstable] + doc: Test whether any active element is true + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: "bool" + assert_instr: [ptest] + compose: + - LLVMLink: { name: "ptest.any.nxv16i1" } + + - name: svptest_first + attr: [*sve-unstable] + doc: Test whether first active element is true + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: "bool" + assert_instr: [ptest] + compose: + - LLVMLink: { name: "ptest.first.nxv16i1" } + + - name: svptest_last + attr: [*sve-unstable] + doc: Test whether last active element is true + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: "bool" + assert_instr: [ptest] + compose: + - LLVMLink: { name: "ptest.last.nxv16i1" } + + - name: svpfirst[_b] + attr: [*sve-unstable] + doc: Set the first active predicate element to true + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: "svbool_t" + assert_instr: [pfirst] + compose: + - LLVMLink: { name: "pfirst.nxv16i1" } + + - name: svpnext_{type} + attr: [*sve-unstable] + doc: Find next active predicate + arguments: ["pg: {predicate}", "op: {predicate}"] + return_type: "{predicate}" + types: [b8, b16, b32, b64] + assert_instr: [pnext] + compose: + - LLVMLink: { name: "pnext.{sve_type}" } + + - name: svbrkn[_b]_z + attr: [*sve-unstable] + doc: Propagate break to next partition + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: "svbool_t" + assert_instr: [brkn] + compose: + - LLVMLink: { name: "brkn.z.nxv16i1" } + + - name: svbrkb[_b]_z + attr: [*sve-unstable] + doc: Break before first true condition + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: "svbool_t" + assert_instr: [brkb] + compose: + - LLVMLink: { name: "brkb.z.nxv16i1" } + + - name: svbrkb[_b]_m + attr: [*sve-unstable] + doc: Break before first true condition + arguments: ["inactive: svbool_t", "pg: svbool_t", "op: svbool_t"] + return_type: "svbool_t" + assert_instr: [brkb] + compose: + - LLVMLink: { name: "brkb.nxv16i1" } + + - name: svbrkpb[_b]_z + attr: [*sve-unstable] + doc: Break before first true condition, propagating from previous partition + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: "svbool_t" + assert_instr: [brkpb] + compose: + - LLVMLink: { name: "brkpb.z.nxv16i1" } + + - name: svbrka[_b]_z + attr: [*sve-unstable] + doc: Break after first true condition + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: "svbool_t" + assert_instr: [brka] + compose: + - LLVMLink: { name: "brka.z.nxv16i1" } + + - name: svbrka[_b]_m + attr: [*sve-unstable] + doc: Break after first true condition + arguments: ["inactive: svbool_t", "pg: svbool_t", "op: svbool_t"] + return_type: "svbool_t" + assert_instr: [brka] + compose: + - LLVMLink: { name: "brka.nxv16i1" } + + - name: svbrkpa[_b]_z + attr: [*sve-unstable] + doc: Break after first true condition, propagating from previous partition + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: "svbool_t" + assert_instr: [brkpa] + compose: + - LLVMLink: { name: "brkpa.z.nxv16i1" } + + - name: svsel[_b] + attr: [*sve-unstable] + doc: Conditionally select elements + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: "svbool_t" + assert_instr: [sel] + compose: + - FnCall: ["simd_select", [$pg, $op1, $op2]] + + - name: svsel[_{type}] + attr: [*sve-unstable] + doc: Conditionally select elements + arguments: ["pg: svbool_t", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [sel] + compose: + - FnCall: + - "simd_select" + - - MatchSize: + - "{type}" + - { default: { MethodCall: [$pg, sve_into, []] }, byte: $pg } + - $op1 + - $op2 + - - MatchSize: + - "{type}" + - byte: svbool_t + halfword: svbool8_t + default: svbool4_t + doubleword: svbool2_t + - _ + + - name: svsub[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Subtract + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64, f32, f64] + assert_instr: ["{type_kind.f}sub"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.f}sub.{sve_type}" } + + - name: svsubr[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Subtract reversed + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64, f32, f64] + assert_instr: ["{type_kind.f}subr"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.f}subr.{sve_type}" } + + - name: svcntp_{predicate} + attr: [*sve-unstable] + doc: Count set predicate bits + arguments: ["pg: {predicate}", "op: {predicate}"] + types: [b8, b16, b32, b64] + return_type: u64 + assert_instr: [cntp] + compose: + - LLVMLink: { name: "cntp.{predicate}" } + + - name: svcompact[_{type}] + attr: [*sve-unstable] + doc: Shuffle active elements of vector to the right and fill with zero + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i32, i64, u32, u64] + assert_instr: [compact] + compose: + - LLVMLink: { name: "compact.{sve_type}" } + + - name: svlasta[_{type}] + attr: [*sve-unstable] + doc: Extract element after last + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [lasta] + compose: + - LLVMLink: { name: "lasta.{sve_type}" } + + - name: svclasta[_{type}] + attr: [*sve-unstable] + doc: Conditionally extract element after last + arguments: ["pg: {predicate}", "fallback: {sve_type}", "data: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [clasta] + compose: + - LLVMLink: { name: "clasta.{sve_type}" } + + - name: svclasta[_n_{type}] + attr: [*sve-unstable] + doc: Conditionally extract element after last + arguments: ["pg: {predicate}", "fallback: {type}", "data: {sve_type}"] + return_type: "{type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [clasta] + compose: + - LLVMLink: { name: "clasta.n.{sve_type}" } + + - name: svlastb[_{type}] + attr: [*sve-unstable] + doc: Extract last element + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [lastb] + compose: + - LLVMLink: { name: "lastb.{sve_type}" } + + - name: svclastb[_{type}] + attr: [*sve-unstable] + doc: Conditionally extract last element + arguments: ["pg: {predicate}", "fallback: {sve_type}", "data: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [clastb] + compose: + - LLVMLink: { name: "clastb.{sve_type}" } + + - name: svclastb[_n_{type}] + attr: [*sve-unstable] + doc: Conditionally extract last element + arguments: ["pg: {predicate}", "fallback: {type}", "data: {sve_type}"] + return_type: "{type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [clastb] + compose: + - LLVMLink: { name: "clastb.n.{sve_type}" } + + - name: svqdecp[_{type}] + attr: [*sve-unstable] + doc: Saturating decrement by active element count + arguments: ["op: {sve_type}", "pg: {predicate}"] + return_type: "{sve_type}" + types: [i16, i32, i64, u16, u32, u64] + assert_instr: ["{type_kind.su}qdecp"] + compose: + - LLVMLink: { name: "{type_kind.su}qdecp.{sve_type}" } + + - name: svqdecp[_n_{type[0]}]_{type[1]} + attr: [*sve-unstable] + doc: Saturating decrement by active element count + arguments: ["op: {type[0]}", "pg: {sve_type[1]}"] + return_type: "{type[0]}" + types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]] + assert_instr: ["{type_kind[0].su}qdecp"] + compose: + - LLVMLink: { name: "{type_kind[0].su}qdecp.n{size[0]}.{sve_type[1]}" } + + - name: svqincp[_{type}] + attr: [*sve-unstable] + doc: Saturating increment by active element count + arguments: ["op: {sve_type}", "pg: {predicate}"] + return_type: "{sve_type}" + types: [i16, i32, i64, u16, u32, u64] + assert_instr: ["{type_kind.su}qincp"] + compose: + - LLVMLink: { name: "{type_kind.su}qincp.{sve_type}" } + + - name: svqincp[_n_{type[0]}]_{type[1]} + attr: [*sve-unstable] + doc: Saturating increment by active element count + arguments: ["op: {type[0]}", "pg: {sve_type[1]}"] + return_type: "{type[0]}" + types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]] + assert_instr: ["{type_kind[0].su}qincp"] + compose: + - LLVMLink: { name: "{type_kind[0].su}qincp.n{size[0]}.{sve_type[1]}" } + + - name: svtmad[_{type}] + attr: [*sve-unstable] + doc: Trigonometric multiply-add coefficient + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + static_defs: ["const IMM3: i32"] + constraints: [{ variable: IMM3, range: [0, 7] }] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [[ftmad, "IMM3 = 0"]] + compose: + - LLVMLink: + name: "ftmad.x.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"] + - FnCall: + - "{llvm_link}" + - [op1, op2, IMM3] + + - name: svtsmul[_{type[0]}] + attr: [*sve-unstable] + doc: Trigonometric starting value + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [f32, u32] + - [f64, u64] + assert_instr: [ftsmul] + compose: + - LLVMLink: + name: "ftsmul.x.{sve_type[0]}" + + - name: svtssel[_{type[0]}] + attr: [*sve-unstable] + doc: Trigonometric select coefficient + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [f32, u32] + - [f64, u64] + assert_instr: [ftssel] + compose: + - LLVMLink: + name: "ftssel.x.{sve_type[0]}" + + - name: svprf{size_literal} + attr: [*sve-unstable] + safety: + unsafe: + - pointer_offset: predicated + substitutions: + textual_size: + match_size: "{type}" + default: words + byte: bytes + halfword: halfwords + doubleword: doublewords + doc: Prefetch {textual_size} + arguments: ["pg: {predicate}", "base: *T"] + static_defs: ["const OP: svprfop", T] + types: [b8, b16, b32, b64] + assert_instr: + - ["prf{size_literal}", "OP = {{svprfop::SV_PLDL1KEEP}}, T = i64"] + test: { load: 0 } + compose: + - LLVMLink: + name: "prf.{sve_type}" + arguments: + ["pg: {predicate}", "base: *crate::ffi::c_void", "op: svprfop"] + - FnCall: + - "{llvm_link}" + - - $pg + - CastAs: [$base, "*const crate::ffi::c_void"] + - $OP + + - name: svprf{size_literal}_vnum + attr: [*sve-unstable] + safety: + unsafe: + - pointer_offset_vnum: predicated + substitutions: + textual_size: + match_size: "{type}" + default: words + byte: bytes + halfword: halfwords + doubleword: doublewords + doc: Prefetch {textual_size} + arguments: ["pg: {predicate}", "base: *T", "vnum: i64"] + static_defs: ["const OP: svprfop", T] + types: [b8, b16, b32, b64] + assert_instr: + - ["prf{size_literal}", "OP = {{svprfop::SV_PLDL1KEEP}}, T = i64"] + test: { load: 0 } + compose: + - FnCall: + - "svprf{size_literal}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + - - $OP + - _ + + - name: svprf{size_literal[1]}_gather_[{type[0]}]{index_or_offset} + attr: [*sve-unstable] + safety: + unsafe: + - pointer_offset: predicated + substitutions: + index_or_offset: + { match_size: "{type[1]}", default: "index", byte: "offset" } + indices_or_offsets: + { match_size: "{type[1]}", default: "indices", byte: "offsets" } + textual_size: + match_size: "{type[1]}" + default: words + byte: bytes + halfword: halfwords + doubleword: doublewords + doc: Prefetch {textual_size} + types: + - [[i32, u32, i64, u64], [i8, i16, i32, i64]] + arguments: + ["pg: {predicate[0]}", "base: *T", "{indices_or_offsets}: {sve_type[0]}"] + static_defs: ["const OP: svprfop", T] + assert_instr: + [["prf{size_literal[1]}", "OP = {{svprfop::SV_PLDL1KEEP}}, T = i64"]] + test: { load: 0 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "prf{size_literal[1]}.gather.{type_kind[0].su}xtw.index.{sve_type[0]}" + arguments: + - "pg: {predicate[0]}" + - "base: *crate::ffi::c_void" + - "{indices_or_offsets}: {sve_type[0]}" + - "op: svprfop" + doubleword: + LLVMLink: + name: "prf{size_literal[1]}.gather.index.{sve_type[0]}" + arguments: + - "pg: {predicate[0]}" + - "base: *crate::ffi::c_void" + - "{indices_or_offsets}: {sve_type[0]}" + - "op: svprfop" + - FnCall: + - "{llvm_link}" + - - $pg + - CastAs: [$base, "*const crate::ffi::c_void"] + - "${indices_or_offsets}" + - $OP + + - name: svprf{size_literal[1]}_gather[_{type[0]}base] + attr: [*sve-unstable] + safety: + unsafe: + - pointer_offset: predicated + - no_provenance: bases + substitutions: + textual_size: + match_size: "{type[1]}" + default: words + byte: bytes + halfword: halfwords + doubleword: doublewords + doc: Prefetch {textual_size} + types: + - [[u32, u64], [i8, i16, i32, i64]] + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + static_defs: ["const OP: svprfop"] + assert_instr: [["prf{size_literal[1]}", "OP = {{svprfop::SV_PLDL1KEEP}}"]] + test: { load: 0 } + compose: + - LLVMLink: + name: "prf{size_literal[1]}.gather.scalar.offset.{sve_type[0]}" + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "index: i64" + - "op: svprfop" + - FnCall: ["{llvm_link}", [$pg, $bases, 0, $OP]] + + - name: svprf{size_literal[1]}_gather[_{type[0]}base]_{index_or_offset} + attr: [*sve-unstable] + safety: + unsafe: + - pointer_offset: predicated + - no_provenance: bases + substitutions: + index_or_offset: + { match_size: "{type[1]}", default: "index", byte: "offset" } + textual_size: + match_size: "{type[1]}" + default: words + byte: bytes + halfword: halfwords + doubleword: doublewords + doc: Prefetch {textual_size} + types: + - [[u32, u64], [i8, i16, i32, i64]] + arguments: + ["pg: {predicate[0]}", "bases: {sve_type[0]}", "{index_or_offset}: i64"] + static_defs: ["const OP: svprfop"] + assert_instr: [["prfb", "OP = {{svprfop::SV_PLDL1KEEP}}"]] + test: { load: 0 } + compose: + - LLVMLink: + name: "prf{size_literal[1]}.gather.scalar.offset.{sve_type[0]}" + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "{index_or_offset}: i64" + - "op: svprfop" + - FnCall: + - "{llvm_link}" + - - $pg + - $bases + - MatchSize: + - "{type[1]}" + - byte: $offset + halfword: { MethodCall: [$index, unchecked_shl, [1]] } + default: { MethodCall: [$index, unchecked_shl, [2]] } + doubleword: { MethodCall: [$index, unchecked_shl, [3]] } + - $OP + + - name: svcvt_{type[0]}[_{type[1]}]{_mxz} + attr: [*sve-unstable] + doc: Floating-point convert + arguments: + ["inactive: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[f32, f64], [i32, u32, i64, u64]] + zeroing_method: { drop: inactive } + substitutions: + convert_from: { match_kind: "{type[1]}", default: s, unsigned: u } + assert_instr: ["{convert_from}cvtf"] + compose: + - LLVMLink: + name: "{convert_from}cvtf.{type[0]}{type[1]}" + + - name: svcvt_{type[0]}[_{type[1]}]{_mxz} + attr: [*sve-unstable] + doc: Floating-point convert + arguments: + ["inactive: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i32, u32, i64, u64], [f32, f64]] + zeroing_method: { drop: inactive } + substitutions: + convert_to: { match_kind: "{type[0]}", default: s, unsigned: u } + assert_instr: ["fcvtz{convert_to}"] + compose: + - LLVMLink: { name: "fcvtz{convert_to}.{type[0]}{type[1]}" } + + - name: svcvt_{type[0]}[_{type[1]}]{_mxz} + attr: [*sve-unstable] + doc: Floating-point convert + arguments: + ["inactive: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f32, f64], [f64, f32]] + zeroing_method: { drop: inactive } + assert_instr: [fcvt] + compose: + - LLVMLink: { name: "fcvt.{type[0]}{type[1]}" } + + - name: svreinterpret_{type[0]}[_{type[1]}] + attr: [*sve-unstable] + doc: Reinterpret vector contents + arguments: ["op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + assert_instr: [] + types: + - - [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + - [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + compose: + - FnCall: ["crate::intrinsics::transmute_unchecked", [$op], [], true] + + - name: svrinta[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Round to nearest, ties away from zero + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frinta] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frinta.{sve_type}" } + + - name: svrinti[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Round using current rounding mode (inexact) + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frinti] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frinti.{sve_type}" } + + - name: svrintm[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Round towards -∞ + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frintm] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frintm.{sve_type}" } + + - name: svrintn[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Round to nearest, ties to even + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frintn] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frintn.{sve_type}" } + + - name: svrintp[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Round towards +∞ + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frintp] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frintp.{sve_type}" } + + - name: svrintx[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Round using current rounding mode (exact) + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frintx] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frintx.{sve_type}" } + + - name: svrintz[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Round towards zero + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frintz] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frintz.{sve_type}" } + + - name: svabd[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Absolute difference + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f64, f32, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind}abd"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind}abd.{sve_type}" } + + - name: svabs[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Absolute value + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64] + assert_instr: ["{type_kind.f}abs"] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "{type_kind.f}abs.{sve_type}" } + + - name: svand[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Bitwise AND + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [and] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op2 + zeroing_method: { select: op1 } + compose: + - LLVMLink: { name: "and.{sve_type}" } + + - name: svandv[_{type}] + attr: [*sve-unstable] + doc: Bitwise AND reduction to scalar + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + assert_instr: [andv] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + compose: + - LLVMLink: { name: "andv.{sve_type}" } + + - name: svand[_b]_z + attr: [*sve-unstable] + doc: Bitwise AND + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: svbool_t + assert_instr: [and] + compose: + - LLVMLink: { name: "and.z.nvx16i1" } + + - name: svmov[_b]_z + attr: [*sve-unstable] + doc: Move + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: svbool_t + assert_instr: [mov] + compose: + - FnCall: ["svand_b_z", [$pg, $op, $op]] + + - name: svbic[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Bitwise clear + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [bic] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op2 + zeroing_method: { select: op1 } + compose: + - LLVMLink: { name: "bic.{sve_type}" } + + - name: svbic[_b]_z + attr: [*sve-unstable] + doc: Bitwise clear + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: svbool_t + assert_instr: [bic] + compose: + - LLVMLink: { name: "bic.z.nvx16i1" } + + - name: sveor[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Bitwise exclusive OR + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [eor] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op2 + zeroing_method: { select: op1 } + compose: + - LLVMLink: { name: "eor.{sve_type}" } + + - name: sveorv[_{type}] + attr: [*sve-unstable] + doc: Bitwise exclusive OR reduction to scalar + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + assert_instr: [eorv] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + compose: + - LLVMLink: { name: "eorv.{sve_type}" } + + - name: sveor[_b]_z + attr: [*sve-unstable] + doc: Bitwise exclusive OR + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: svbool_t + assert_instr: [eor] + compose: + - LLVMLink: { name: "eor.z.nvx16i1" } + + - name: svnot[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Bitwise invert + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [not] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "not.{sve_type}" } + + - name: svnot[_b]_z + attr: [*sve-unstable] + doc: Bitwise invert + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: svbool_t + assert_instr: [not] + compose: + - FnCall: ["sveor_b_z", [$pg, $op, $pg]] + + - name: svcnot[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Logically invert boolean condition + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [cnot] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "cnot.{sve_type}" } + + - name: svnand[_b]_z + attr: [*sve-unstable] + doc: Bitwise NAND + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: svbool_t + assert_instr: [nand] + compose: + - LLVMLink: { name: "nand.z.nxv16i1" } + + - name: svnor[_b]_z + attr: [*sve-unstable] + doc: Bitwise NOR + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: svbool_t + assert_instr: [nor] + compose: + - LLVMLink: { name: "nor.z.nxv16i1" } + + - name: svorr[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Bitwise inclusive OR + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [orr] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op2 + zeroing_method: { select: op1 } + compose: + - LLVMLink: { name: "orr.{sve_type}" } + + - name: svorv[_{type}] + attr: [*sve-unstable] + doc: Bitwise inclusive OR reduction to scalar + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + assert_instr: [orv] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + compose: + - LLVMLink: { name: "orv.{sve_type}" } + + - name: svorr[_b]_z + attr: [*sve-unstable] + doc: Bitwise inclusive OR + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: svbool_t + assert_instr: [orr] + compose: + - LLVMLink: { name: "orr.z.nvx16i1" } + + - name: svorn[_b]_z + attr: [*sve-unstable] + doc: Bitwise inclusive OR, inverting second argument + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: svbool_t + assert_instr: [orn] + compose: + - LLVMLink: { name: "orn.z.nvx16i1" } + + - name: svlsl[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Logical shift left + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i8, u8], u8] + - [[i16, u16], u16] + - [[i32, u32], u32] + - [[i64, u64], u64] + assert_instr: [lsl] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "lsl.{sve_type[0]}" } + + - name: svlsl_wide[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Logical shift left + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i8, i16, i32, u8, u16, u32], u64] + assert_instr: [lsl] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "lsl.wide.{sve_type[0]}" } + + - name: svasr[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Arithmetic shift right + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + assert_instr: [asr] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "asr.{sve_type[0]}" } + + - name: svasr_wide[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Arithmetic shift right + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i8, i16, i32], u64] + assert_instr: [asr] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "asr.wide.{sve_type[0]}" } + + - name: svasrd[_n_{type}]{_mxz} + attr: [*sve-unstable] + doc: Arithmetic shift right for divide by immediate + arguments: ["pg: {predicate}", "op1: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size}"] }] + types: [i8, i16, i32, i64] + assert_instr: [[asrd, "IMM2 = 1"]] + zeroing_method: { select: op1 } + compose: + - LLVMLink: + name: "asrd.{sve_type}" + arguments: ["pg: {predicate}", "op1: {sve_type}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$pg, $op1, $IMM2]] + + - name: svlsr[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Logical shift right + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8, u16, u32, u64] + assert_instr: [lsr] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "lsr.{sve_type}" } + + - name: svlsr_wide[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Logical shift right + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[u8, u16, u32], u64] + assert_instr: [lsr] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "lsr.wide.{sve_type[0]}" } + + - name: svadda[_{type}] + attr: [*sve-unstable] + doc: Add reduction (strictly-ordered) + arguments: ["pg: {predicate}", "initial: {type}", "op: {sve_type}"] + return_type: "{type}" + assert_instr: [fadda] + types: [f32, f64] + compose: + - LLVMLink: { name: "fadda.{sve_type}" } + + - name: svaddv[_{type}] + attr: [*sve-unstable] + doc: Add reduction + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + types: [f32, f64, i64, u64] + assert_instr: [{ float: faddv, default: uaddv }] + compose: + - LLVMLink: { name: "{type_kind.fsu}addv.{sve_type}" } + + - name: svaddv[_{type[0]}] + attr: [*sve-unstable] + doc: Add reduction + arguments: ["pg: {predicate[0]}", "op: {sve_type[0]}"] + return_type: "{type[1]}" + types: + - [[i8, i16, i32], i64] + - [[u8, u16, u32], u64] + assert_instr: ["{type_kind[0].su}addv"] + compose: + - LLVMLink: { name: "{type_kind[0].su}addv.{sve_type[0]}" } + + - name: svmaxv[_{type}] + attr: [*sve-unstable] + doc: Maximum reduction to scalar + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.fsu}maxv"] + compose: + - LLVMLink: { name: "{type_kind.fsu}maxv.{sve_type}" } + + - name: svmaxnmv[_{type}] + attr: [*sve-unstable] + doc: Maximum number reduction to scalar + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + types: [f32, f64] + assert_instr: [fmaxnmv] + compose: + - LLVMLink: { name: "fmaxnmv.{sve_type}" } + + - name: svminv[_{type}] + attr: [*sve-unstable] + doc: Minimum reduction to scalar + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.fsu}minv"] + compose: + - LLVMLink: { name: "{type_kind.fsu}minv.{sve_type}" } + + - name: svminnmv[_{type}] + attr: [*sve-unstable] + doc: Minimum number reduction to scalar + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + types: [f32, f64] + assert_instr: [fminnmv] + compose: + - LLVMLink: { name: "fminnmv.{sve_type}" } + + - name: svmul[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Multiply + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: ["{type_kind.f}mul"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.f}mul.{sve_type}" } + + - name: svmulh[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Multiply, returning high-half + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: ["{type_kind.su}mulh"] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}mulh.{sve_type}" } + + - name: svmulx[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Multiply extended (∞×0=2) + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: ["fmulx"] + types: [f32, f64] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "fmulx.{sve_type}" } + + - name: svrecpe[_{type}] + attr: [*sve-unstable] + doc: Reciprocal estimate + arguments: ["op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frecpe] + compose: + - LLVMLink: { name: "frecpe.x.{sve_type}" } + + - name: svrecps[_{type}] + attr: [*sve-unstable] + doc: Reciprocal step + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frecps] + compose: + - LLVMLink: { name: "frecps.x.{sve_type}" } + + - name: svrecpx[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Reciprocal exponent + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frecpx] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frecpx.x.{sve_type}" } + + - name: svrsqrte[_{type}] + attr: [*sve-unstable] + doc: Reciprocal square root estimate + arguments: ["op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frsqrte] + compose: + - LLVMLink: { name: "frsqrte.x.{sve_type}" } + + - name: svrsqrts[_{type}] + attr: [*sve-unstable] + doc: Reciprocal square root step + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frsqrts] + compose: + - LLVMLink: { name: "frsqrts.x.{sve_type}" } + + - name: svmad[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Multiply-add, multiplicand first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: ["{type_kind.f}mad"] + compose: + - LLVMLink: { name: "{type_kind.f}mad.{sve_type}" } + + - name: svmla[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Multiply-add, addend first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: ["{type_kind.f}mla"] + compose: + - LLVMLink: { name: "{type_kind.f}mla.{sve_type}" } + + - name: svmla_lane[_{type}] + attr: [*sve-unstable] + doc: Multiply-add, addend first + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + types: [f32, f64] + assert_instr: [[fmla, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "fmla.lane.{sve_type}" + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svmls[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Multiply-subtract, minuend first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: ["{type_kind.f}mls"] + compose: + - LLVMLink: { name: "{type_kind.f}mls.{sve_type}" } + + - name: svmls_lane[_{type}] + attr: [*sve-unstable] + doc: Multiply-subtract, minuend first + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + types: [f32, f64] + assert_instr: [[fmls, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "fmls.lane.{sve_type}" + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svmsb[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Multiply-subtract, multiplicand first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: ["{type_kind.f}msb"] + compose: + - LLVMLink: { name: "{type_kind.f}msb.{sve_type}" } + + - name: svnmad[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Negated multiply-add, multiplicand first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: [fnmad] + compose: + - LLVMLink: { name: "fnmad.{sve_type}" } + + - name: svnmla[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Negated multiply-add, addend first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: [fnmla] + compose: + - LLVMLink: { name: "fnmla.{sve_type}" } + + - name: svnmls[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Negated multiply-subtract, minuend first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: [fnmls] + compose: + - LLVMLink: { name: "fnmls.{sve_type}" } + + - name: svnmsb[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Negated multiply-subtract, multiplicand first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: [fnmsb] + compose: + - LLVMLink: { name: "fnmsb.{sve_type}" } + + - name: svneg[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Negate + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64] + assert_instr: ["{type_kind.f}neg"] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "{type_kind.f}neg.{sve_type}" } + + - name: svqadd[{_n}_{type}] + attr: [*sve-unstable] + doc: Saturating add + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.su}qadd"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}qadd.x.{sve_type}" } + + - name: svadr{size_literal[2]}[_{type[0]}base]_[{type[1]}]{index_or_offset} + attr: [*sve-unstable] + substitutions: + index_or_offset: { match_size: "{type[2]}", default: index, byte: offset } + indices_or_offsets: + { match_size: "{type[2]}", default: indices, byte: offsets } + doc: Compute vector addresses for {size[2]}-bit data + arguments: ["bases: {sve_type[0]}", "{indices_or_offsets}: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [u32, [i32, u32], [i8, i16, i32, i64]] + - [u64, [i64, u64], [i8, i16, i32, i64]] + assert_instr: [adr] + compose: + - LLVMLink: { name: "adr{size_literal[2]}.{sve_type[0]}" } + + - name: svdot[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Dot product + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i32, i8] + - [i64, i16] + - [u32, u8] + - [u64, u16] + assert_instr: ["{type_kind[0].su}dot"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind[0].su}dot.{sve_type[0]}" } + + - name: svdot_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Dot product + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[0]}" }] + types: + - [i32, i8] + - [i64, i16] + - [u32, u8] + - [u64, u16] + assert_instr: [["{type_kind[0].su}dot", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}dot.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "imm_index: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svusdot[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Dot product (unsigned × signed) + target_features: [i8mm] + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[2]}"] + return_type: "{sve_type[0]}" + types: [[i32, u8, i8]] + assert_instr: [usdot] + n_variant_op: op3 + compose: + - LLVMLink: { name: "usdot.{sve_type[0]}" } + + - name: svusdot_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Dot product (unsigned × signed) + target_features: [i8mm] + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[2]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[0]}" }] + types: [[i32, u8, i8]] + assert_instr: [[usdot, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "usdot.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[2]}" + - "imm_index: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svsudot[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Dot product (signed × unsigned) + target_features: [i8mm] + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[2]}"] + return_type: "{sve_type[0]}" + types: [[i32, i8, u8]] + assert_instr: [usdot] + n_variant_op: op3 + compose: + - FnCall: ["svusdot_{type[0]}", [$op1, $op3, $op2]] + + - name: svsudot_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Dot product (signed × unsigned) + target_features: [i8mm] + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[2]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[0]}" }] + types: [[i32, i8, u8]] + assert_instr: [[sudot, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "sudot.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[2]}" + - "imm_index: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svdiv[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Divide + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i32, i64, u32, u64] + assert_instr: ["{type_kind.fsu}div"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.fsu}div.{sve_type}" } + + - name: svdivr[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Divide reversed + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i32, i64, u32, u64] + assert_instr: ["{type_kind.fsu}divr"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.fsu}divr.{sve_type}" } + + - name: svexpa[_{type[0]}] + attr: [*sve-unstable] + doc: Floating-point exponential accelerator + arguments: ["op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f32, u32], [f64, u64]] + assert_instr: [fexpa] + compose: + - LLVMLink: { name: "fexpa.x.{sve_type[0]} " } + + - name: svscale[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Adjust exponent + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f32, i32], [f64, i64]] + assert_instr: [fscale] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "fscale.{sve_type[0]}" } + + - name: svmmla[_{type}] + attr: [*sve-unstable] + doc: Matrix multiply-accumulate + target_features: [f32mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [f32] + assert_instr: [fmmla] + compose: + - LLVMLink: { name: "fmmla.{sve_type}" } + + - name: svmmla[_{type}] + attr: [*sve-unstable] + doc: Matrix multiply-accumulate + target_features: [f64mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [f64] + assert_instr: [fmmla] + compose: + - LLVMLink: { name: "fmmla.{sve_type}" } + + - name: svmmla[_{type[0]}] + attr: [*sve-unstable] + doc: Matrix multiply-accumulate + target_features: [i8mm] + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i32, i8], [u32, u8]] + assert_instr: ["{type_kind[0].su}mmla"] + compose: + - LLVMLink: { name: "{type_kind[0].su}mmla.{sve_type[0]}" } + + - name: svusmmla[_{type[0]}] + attr: [*sve-unstable] + doc: Matrix multiply-accumulate (unsigned × signed) + target_features: [i8mm] + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[2]}"] + return_type: "{sve_type[0]}" + types: [[i32, u8, i8]] + assert_instr: [usmmla] + compose: + - LLVMLink: { name: "usmmla.{sve_type[0]}" } + + - name: svmin[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Minimum + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + assert_instr: ["{type_kind.fsu}min"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.fsu}min.{sve_type}" } + + - name: svminnm[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Minimum number + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + zeroing_method: { select: op1 } + assert_instr: [fminnm] + n_variant_op: op2 + compose: + - LLVMLink: { name: "fminnm.{sve_type}" } diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/sve2/aarch64.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/sve2/aarch64.spec.yml new file mode 100644 index 0000000000000..6365bea21b511 --- /dev/null +++ b/library/stdarch/crates/stdarch-gen-arm/spec/sve2/aarch64.spec.yml @@ -0,0 +1,3196 @@ +arch_cfgs: + - arch_name: aarch64 + target_feature: [sve, sve2] + llvm_prefix: llvm.aarch64.sve + +auto_llvm_sign_conversion: true +generate_load_store_tests: true + +# `#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]` +sve-unstable: &sve-unstable + FnCall: [unstable, ['feature = "stdarch_aarch64_sve"', 'issue= "145052"']] + +intrinsics: + - name: svbext[{_n}_{type}] + attr: [*sve-unstable] + target_features: [sve2-bitperm] + doc: Gather lower bits from positions selected by bitmask + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8, u16, u32, u64] + assert_instr: [bext] + n_variant_op: op2 + compose: + - LLVMLink: { name: "bext.x.{sve_type}" } + + - name: svbgrp[{_n}_{type}] + attr: [*sve-unstable] + target_features: [sve2-bitperm] + doc: Group bits to right or left as selected by bitmask + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8, u16, u32, u64] + assert_instr: [bgrp] + n_variant_op: op2 + compose: + - LLVMLink: { name: "bgrp.x.{sve_type}" } + + - name: svbdep[{_n}_{type}] + attr: [*sve-unstable] + target_features: [sve2-bitperm] + doc: Scatter lower bits into positions selected by bitmask + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8, u16, u32, u64] + assert_instr: [bdep] + n_variant_op: op2 + compose: + - LLVMLink: { name: "bdep.x.{sve_type}" } + + - name: svhistcnt[_{type[0]}]_z + attr: [*sve-unstable] + doc: Count matching elements + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: [[i32, u32], [i64, u64], [u32, u32], [u64, u64]] + assert_instr: [histcnt] + compose: + - LLVMLink: { name: "histcnt.{sve_type[0]}" } + + - name: svhistseg[_{type[0]}] + attr: [*sve-unstable] + doc: Count matching elements in 128-bit segments + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: [[i8, u8], [u8, u8]] + assert_instr: [histseg] + compose: + - LLVMLink: { name: "histseg.{sve_type[0]}" } + + - name: svmatch[_{type}] + attr: [*sve-unstable] + doc: Detect any matching elements + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [i8, i16, u8, u16] + assert_instr: [match] + compose: + - LLVMLink: { name: "match.{sve_type}" } + + - name: svnmatch[_{type}] + attr: [*sve-unstable] + doc: Detect no matching elements + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [i8, i16, u8, u16] + assert_instr: [nmatch] + compose: + - LLVMLink: { name: "nmatch.{sve_type}" } + + - name: svhadd[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Halving add + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + assert_instr: ["{type_kind.su}hadd"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}hadd.{sve_type}" } + + - name: svrhadd[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Rounding halving add + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + assert_instr: ["{type_kind.su}rhadd"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}rhadd.{sve_type}" } + + - name: svaddhnb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Add narrow high part (bottom) + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + return_type: "{sve_type[1]}" + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[0]}"] + assert_instr: [addhnb] + n_variant_op: op2 + compose: + - LLVMLink: { name: "addhnb.{sve_type[0]}" } + + - name: svaddhnt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Add narrow high part (top) + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + return_type: "{sve_type[1]}" + arguments: + ["even: {sve_type[1]}", "op1: {sve_type[0]}", "op2: {sve_type[0]}"] + assert_instr: [addhnt] + n_variant_op: op2 + compose: + - LLVMLink: { name: "addhnt.{sve_type[0]}" } + + - name: svraddhnb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Rounding add narrow high part (bottom) + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + return_type: "{sve_type[1]}" + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[0]}"] + assert_instr: [raddhnb] + n_variant_op: op2 + compose: + - LLVMLink: { name: "raddhnb.{sve_type[0]}" } + + - name: svraddhnt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Rounding add narrow high part (top) + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + return_type: "{sve_type[1]}" + arguments: + ["even: {sve_type[1]}", "op1: {sve_type[0]}", "op2: {sve_type[0]}"] + assert_instr: [raddhnt] + n_variant_op: op2 + compose: + - LLVMLink: { name: "raddhnt.{sve_type[0]}" } + + - name: svcadd[_{type}] + attr: [*sve-unstable] + doc: Complex add with rotate + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + static_defs: ["const IMM_ROTATION: i32"] + constraints: [{ variable: IMM_ROTATION, any_values: [90, 270] }] + assert_instr: [[cadd, "IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: cadd.x.{sve_type} + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm_rotation: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM_ROTATION]] + + - name: svcdot[_{type[0]}] + attr: [*sve-unstable] + doc: Complex dot product + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i32, i8], [i64, i16]] + static_defs: ["const IMM_ROTATION: i32"] + constraints: [{ variable: IMM_ROTATION, any_values: [0, 90, 180, 270] }] + assert_instr: [[cdot, "IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: cdot.{sve_type[0]} + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_ROTATION]] + + - name: svcdot_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Complex dot product + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i32, i8], [i64, i16]] + static_defs: ["const IMM_INDEX: i32", "const IMM_ROTATION: i32"] + constraints: + - { variable: IMM_INDEX, vec_max_elems_type: "{type[0]}" } + - { variable: IMM_ROTATION, any_values: [0, 90, 180, 270] } + assert_instr: [[cdot, "IMM_INDEX = 0, IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: cdot.lane.{sve_type[0]} + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "imm_index: i32" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX, $IMM_ROTATION]] + + - name: svcmla[_{type}] + attr: [*sve-unstable] + doc: Complex multiply-add with rotate + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + static_defs: ["const IMM_ROTATION: i32"] + constraints: [{ variable: IMM_ROTATION, any_values: [0, 90, 180, 270] }] + assert_instr: [[cmla, "IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: cmla.x.{sve_type} + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_ROTATION]] + + - name: svcmla_lane[_{type}] + attr: [*sve-unstable] + doc: Complex multiply-add with rotate + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [i16, i32, u16, u32] + static_defs: ["const IMM_INDEX: i32", "const IMM_ROTATION: i32"] + constraints: + - variable: IMM_INDEX + range: { match_size: "{type}", default: [0, 1], halfword: [0, 3] } + - { variable: IMM_ROTATION, any_values: [0, 90, 180, 270] } + assert_instr: [[cmla, "IMM_INDEX = 0, IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: cmla.lane.x.{sve_type} + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "imm_index: i32" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX, $IMM_ROTATION]] + + - name: svqrdcmlah[_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling complex multiply-add high with rotate + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + static_defs: ["const IMM_ROTATION: i32"] + constraints: [{ variable: IMM_ROTATION, any_values: [0, 90, 180, 270] }] + assert_instr: [[sqrdcmlah, "IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: sqrdcmlah.x.{sve_type} + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_ROTATION]] + + - name: svqrdcmlah_lane[_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling complex multiply-add high with rotate + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [i16, i32] + static_defs: ["const IMM_INDEX: i32", "const IMM_ROTATION: i32"] + constraints: + - variable: IMM_INDEX + range: { match_size: "{type}", default: [0, 1], halfword: [0, 3] } + - { variable: IMM_ROTATION, any_values: [0, 90, 180, 270] } + assert_instr: [[sqrdcmlah, "IMM_INDEX = 0, IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: sqrdcmlah.lane.x.{sve_type} + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "imm_index: i32" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX, $IMM_ROTATION]] + + - name: svqcadd[_{type}] + attr: [*sve-unstable] + doc: Saturating complex add with rotate + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + static_defs: ["const IMM_ROTATION: i32"] + constraints: [{ variable: "IMM_ROTATION", any_values: [90, 270] }] + assert_instr: [[sqcadd, "IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: "sqcadd.x.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm_rotation: i32"] + - FnCall: ["{llvm_link}", ["$op1", "$op2", "$IMM_ROTATION"]] + + - name: svsublb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract long (bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}sublb"] + n_variant_op: op2 + compose: + - LLVMLink: + name: "{type_kind[0].su}sublb.{sve_type[0]}" + + - name: svsublbt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract long (bottom - top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + assert_instr: [ssublbt] + n_variant_op: op2 + compose: + - LLVMLink: + name: "ssublbt.{sve_type[0]}" + + - name: svsublt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract long (top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}sublt"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}sublt.{sve_type[0]}" } + + - name: svsubltb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract long (top - bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + assert_instr: [ssubltb] + n_variant_op: op2 + compose: + - LLVMLink: + name: "ssubltb.{sve_type[0]}" + + - name: svsubwb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract wide (bottom) + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}subwb"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}subwb.{sve_type[0]}" } + + - name: svsubwt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract wide (top) + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}subwt"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}subwt.{sve_type[0]}" } + + - name: svrsubhnb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Rounding subtract narrow high part (bottom) + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [rsubhnb] + n_variant_op: op2 + compose: + - LLVMLink: { name: "rsubhnb.{sve_type[0]}" } + + - name: svrsubhnt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Rounding subtract narrow high part (top) + arguments: + ["even: {sve_type[1]}", "op1: {sve_type[0]}", "op2: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [rsubhnt] + n_variant_op: op2 + compose: + - LLVMLink: { name: "rsubhnt.{sve_type[0]}" } + + - name: svsubhnb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract narrow high part (bottom) + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [subhnb] + n_variant_op: op2 + compose: + - LLVMLink: { name: "subhnb.{sve_type[0]}" } + + - name: svsubhnt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract narrow high part (top) + arguments: + ["even: {sve_type[1]}", "op1: {sve_type[0]}", "op2: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [subhnt] + n_variant_op: op2 + compose: + - LLVMLink: { name: "subhnt.{sve_type[0]}" } + + - name: svsbclb[{_n}_{type}] + attr: [*sve-unstable] + doc: Subtract with borrow long (bottom) + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [u32, u64] + assert_instr: [sbclb] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sbclb.{sve_type}" } + + - name: svsbclt[{_n}_{type}] + attr: [*sve-unstable] + doc: Subtract with borrow long (top) + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [u32, u64] + assert_instr: [sbclt] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sbclt.{sve_type}" } + + - name: svqsub[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Saturating subtract + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + assert_instr: ["{type_kind.su}qsub"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}qsub.{sve_type}" } + + - name: svqsubr[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Saturating subtract reversed + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + assert_instr: ["{type_kind.su}qsubr"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}qsubr.{sve_type}" } + + - name: svhsub[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Halving subtract + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.su}hsub"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}hsub.{sve_type}" } + + - name: svhsubr[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Halving subtract reversed + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.su}hsub"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}hsubr.{sve_type}" } + + - name: svwhilege_{sve_type[1]}[_{type[0]}] + attr: [*sve-unstable] + doc: While decrementing scalar is greater than or equal to + arguments: ["op1: {type[0]}", "op2: {type[0]}"] + return_type: "{sve_type[1]}" + types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]] + assert_instr: [{ default: whilege, unsigned: whilehs }] + compose: + - MatchKind: + - "{type[0]}" + - default: { LLVMLink: { name: "whilege.{sve_type[1]}.{type[0]}" } } + unsigned: { LLVMLink: { name: "whilehs.{sve_type[1]}.{type[0]}" } } + + - name: svwhilegt_{sve_type[1]}[_{type[0]}] + attr: [*sve-unstable] + doc: While decrementing scalar is greater than + arguments: ["op1: {type[0]}", "op2: {type[0]}"] + return_type: "{sve_type[1]}" + types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]] + assert_instr: [{ default: whilegt, unsigned: whilehi }] + compose: + - MatchKind: + - "{type[0]}" + - default: { LLVMLink: { name: "whilegt.{sve_type[1]}.{type[0]}" } } + unsigned: { LLVMLink: { name: "whilehi.{sve_type[1]}.{type[0]}" } } + + - name: svwhilerw_{size}ptr + attr: [*sve-unstable] + safety: + unsafe: [] + visibility: private + static_defs: [T] + substitutions: + size_alt: + match_size: "{type}" + byte: b + halfword: h + default: s + doubleword: d + arguments: ["op1: *T", "op2: *T"] + return_type: "{predicate}" + types: [i8, i16, i32, i64] + assert_instr: [] + compose: + - Let: [op1, CastAs: [$op1, "*const crate::ffi::c_void"]] + - Let: [op2, CastAs: [$op2, "*const crate::ffi::c_void"]] + - LLVMLink: + name: "whilerw.{size_alt}.{predicate}.p0" + arguments: ["op1: *crate::ffi::c_void", "op2: *crate::ffi::c_void"] + + - name: svwhilerw[_{type}] + attr: [*sve-unstable] + doc: While free of read-after-write conflicts + # TODO: This might be safe even with unrelated pointers, but the LLVM builtin's guarantees don't + # seem to be documented, so we conservatively keep this unsafe for now. + safety: + unsafe: + - custom: "[`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints + must be met for at least the base pointers, `op1` and `op2`." + arguments: ["op1: *{type}", "op2: *{type}"] + return_type: "svbool_t" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [whilerw] + compose: + - FnCall: + - "svwhilerw_{size}ptr" + - - $op1 + - $op2 + - - Type: "{type}" + + - name: svwhilewr_{size}ptr + attr: [*sve-unstable] + safety: + unsafe: [] + visibility: private + static_defs: [T] + substitutions: + size_alt: + match_size: "{type}" + byte: b + halfword: h + default: s + doubleword: d + arguments: ["op1: *T", "op2: *T"] + return_type: "{predicate}" + types: [i8, i16, i32, i64] + assert_instr: [] + compose: + - Let: [op1, CastAs: [$op1, "*const crate::ffi::c_void"]] + - Let: [op2, CastAs: [$op2, "*const crate::ffi::c_void"]] + - LLVMLink: + name: "whilewr.{size_alt}.{predicate}.p0" + arguments: ["op1: *crate::ffi::c_void", "op2: *crate::ffi::c_void"] + + - name: svwhilewr[_{type}] + attr: [*sve-unstable] + doc: While free of write-after-read conflicts + # TODO: This might be safe even with unrelated pointers, but the LLVM builtin's guarantees don't + # seem to be documented, so we conservatively keep this unsafe for now. + safety: + unsafe: + - custom: "[`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints + must be met for at least the base pointers, `op1` and `op2`." + arguments: ["op1: *{type}", "op2: *{type}"] + return_type: "svbool_t" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [whilewr] + compose: + - FnCall: + - "svwhilewr_{size}ptr" + - - $op1 + - $op2 + - - Type: "{type}" + + - name: svtbl2[_{type[0]}] + attr: [*sve-unstable] + doc: Table lookup in two-vector table + arguments: ["data: {sve_type_x2[0]}", "indices: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [f32, u32] + - [f64, u64] + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + - [u8, u8] + - [u16, u16] + - [u32, u32] + - [u64, u64] + assert_instr: [tbl] + compose: + - LLVMLink: + name: "tbl2.{sve_type[0]}" + arguments: + - "data0: {sve_type[0]}" + - "data1: {sve_type[0]}" + - "indices: {sve_type[1]}" + - FnCall: + - "{llvm_link}" + - - FnCall: ["svget2_{type[0]}", ["$data"], [0]] + - FnCall: ["svget2_{type[0]}", ["$data"], [1]] + - $indices + + - name: svtbx[_{type[0]}] + attr: [*sve-unstable] + doc: Table lookup in single-vector table (merging) + arguments: + - "fallback: {sve_type[0]}" + - "data: {sve_type[0]}" + - "indices: {sve_type[1]}" + return_type: "{sve_type[0]}" + types: + - [f32, u32] + - [f64, u64] + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + - [u8, u8] + - [u16, u16] + - [u32, u32] + - [u64, u64] + assert_instr: [tbx] + compose: + - LLVMLink: { name: "tbx.{sve_type[0]}" } + + - name: svcvtlt_{type[0]}[_{type[1]}]_m + attr: [*sve-unstable] + doc: Up convert long (top) + arguments: + ["inactive: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f64, f32]] + assert_instr: [fcvtlt] + compose: + - LLVMLink: { name: "fcvtlt.{type[0]}{type[1]}" } + + - name: svcvtlt_{type[0]}[_{type[1]}]_x + attr: [*sve-unstable] + doc: Up convert long (top) + arguments: ["pg: svbool_t", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f64, f32]] + assert_instr: [fcvtlt] + compose: + - FnCall: + - "svcvtlt_{type[0]}_{type[1]}_m" + - - FnCall: ["crate::intrinsics::transmute_unchecked", [$op], [], true] + - $pg + - $op + + - name: svcvtnt_{type[0]}[_{type[1]}]{_mx} + attr: [*sve-unstable] + doc: Down convert and narrow (top) + arguments: + ["even: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f32, f64]] + assert_instr: [fcvtnt] + compose: + - LLVMLink: { name: "fcvtnt.{type[0]}{type[1]}" } + + - name: svcvtx_{type[0]}[_{type[1]}]{_mxz} + attr: [*sve-unstable] + doc: Down convert, rounding to odd + arguments: + ["inactive: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f32, f64]] + zeroing_method: { drop: inactive } + assert_instr: [fcvtx] + compose: + - LLVMLink: { name: "fcvtx.{type[0]}{type[1]}" } + + - name: svcvtxnt_{type[0]}[_{type[1]}]{_mx} + attr: [*sve-unstable] + doc: Down convert, rounding to odd (top) + arguments: + ["even: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f32, f64]] + assert_instr: [fcvtxnt] + compose: + - LLVMLink: { name: "fcvtxnt.{type[0]}{type[1]}" } + + - name: svldnt1_gather_[{type[0]}]index[_{type[1]}] + attr: [*sve-unstable] + doc: Unextended load, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + ["pg: {predicate[0]}", "base: *{type[1]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["ldnt1{size_literal[0]}"] + test: { load: 1 } + compose: + - LLVMLink: { name: "ldnt1.gather.index.{sve_type[1]}" } + + - name: svldnt1_gather_[{type[0]}]offset[_{type[1]}] + attr: [*sve-unstable] + doc: Unextended load, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + ["pg: {predicate[0]}", "base: *{type[1]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["ldnt1{size_literal[0]}"] + test: { load: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: { name: "ldnt1.gather.uxtw.{sve_type[1]}" } + doubleword: + LLVMLink: { name: "ldnt1.gather.{sve_type[1]}" } + + - name: svldnt1_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Unextended load, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ldnt1{size_literal[0]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ldnt1.gather.scalar.offset.{sve_type[1]}.{sve_type[0]}" + + - name: svldnt1_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Unextended load, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ldnt1{size_literal[0]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldnt1_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svldnt1_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Unextended load, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ldnt1{size_literal[0]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldnt1_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[0]}"]] + + - name: svldnt1s{size_literal[2]}_gather_[{type[0]}]index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i64, u64], [i64, u64], [i16, i32]] + assert_instr: ["ldnt1s{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ldnt1.gather.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $base, $indices]] + + - name: svldnt1u{size_literal[2]}_gather_[{type[0]}]index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i64, u64], [u64, i64], [u16, u32]] + assert_instr: ["ldnt1{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ldnt1.gather.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $base, $indices]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svldnt1s{size_literal[2]}_gather_[{type[0]}]offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [[i64, u64], [i64, u64], [i8, i16, i32]] + assert_instr: ["ldnt1s{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldnt1.gather.uxtw.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ldnt1.gather.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]] + + - name: svldnt1u{size_literal[2]}_gather_[{type[0]}]offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [u32, i32], [u8, u16]] + - [[i64, u64], [u64, i64], [u8, u16, u32]] + assert_instr: ["ldnt1{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldnt1.gather.uxtw.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ldnt1.gather.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svldnt1s{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["ldnt1s{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ldnt1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]] + + - name: svldnt1u{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [u32, i32], [u8, u16]] + - [u64, [u64, i64], [u8, u16, u32]] + assert_instr: ["ldnt1{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ldnt1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svldnt1s{size_literal[2]}_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["ldnt1s{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldnt1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svldnt1u{size_literal[2]}_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [u8, u16]] + - [u64, [i64, u64], [u8, u16, u32]] + assert_instr: ["ldnt1{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldnt1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svldnt1s{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], u16] + - [u64, [i64, u64], [u16, u32]] + assert_instr: ["ldnt1s{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldnt1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + + - name: svldnt1u{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], u16] + - [u64, [i64, u64], [u16, u32]] + assert_instr: ["ldnt1{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldnt1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + + - name: svstnt1_scatter_[{type[0]}]index[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "indices: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["stnt1{size_literal[0]}"] + test: { store: 1 } + compose: + - LLVMLink: + name: "stnt1.scatter.index.{sve_type[1]}" + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "indices: {sve_type[0]}" + - FnCall: ["{llvm_link}", [$data, $pg, $base, $indices]] + + - name: svstnt1_scatter_[{type[0]}]offset[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "offsets: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["stnt1{size_literal[0]}"] + test: { store: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "stnt1.scatter.uxtw.{sve_type[1]}" + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "offsets: {sve_type[0]}" + doubleword: + LLVMLink: + name: "stnt1.scatter.{sve_type[1]}" + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "offsets: {sve_type[0]}" + - FnCall: ["{llvm_link}", [$data, $pg, $base, $offsets]] + + - name: svstnt1_scatter[_{type[0]}base]_offset[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + - "data: {sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["stnt1{size_literal[0]}"] + test: { store: 1 } + compose: + - LLVMLink: + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + name: "stnt1.scatter.scalar.offset.{sve_type[1]}.{sve_type[0]}" + - FnCall: ["{llvm_link}", [$data, $pg, $bases, $offset]] + + - name: svstnt1_scatter[_{type[0]}base_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: + ["pg: {predicate[0]}", "bases: {sve_type[0]}", "data: {sve_type[1]}"] + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["stnt1{size_literal[0]}"] + test: { store: 1 } + compose: + - FnCall: + - "svstnt1_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + - $data + + - name: svstnt1_scatter[_{type[0]}base]_index[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "index: i64" + - "data: {sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["stnt1{size_literal[0]}"] + test: { store: 1 } + compose: + - FnCall: + - "svstnt1_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[0]}"]] + - $data + + - name: svstnt1{size_literal[2]}_scatter_[{type[0]}]index[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "indices: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [[i64, u64], i64, [i16, i32]] + - [[i64, u64], u64, [u16, u32]] + assert_instr: ["stnt1{size_literal[2]}"] + test: { store: 2 } + compose: + - LLVMLink: + name: "stnt1.scatter.index.{sve_type[1] as {type[2]}}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "indices: {sve_type[0]}" + - FnCall: + - "{llvm_link}" + - [FnCall: ["crate::intrinsics::simd::simd_cast", [$data]], $pg, $base, $indices] + + - name: svstnt1{size_literal[2]}_scatter_[{type[0]}]offset[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "offsets: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [u32, i32, [i8, i16]] + - [u32, u32, [u8, u16]] + - [[i64, u64], i64, [i8, i16, i32]] + - [[i64, u64], u64, [u8, u16, u32]] + assert_instr: ["stnt1{size_literal[2]}"] + test: { store: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "stnt1.scatter.uxtw.{sve_type[1] as {type[2]}}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "offsets: {sve_type[0]}" + doubleword: + LLVMLink: + name: "stnt1.scatter.{sve_type[1] as {type[2]}}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "offsets: {sve_type[0]}" + - FnCall: + - "{llvm_link}" + - [FnCall: ["crate::intrinsics::simd::simd_cast", [$data]], $pg, $base, $offsets] + + - name: svstnt1{size_literal[2]}_scatter[_{type[0]}base]_offset[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + - "data: {sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["stnt1{size_literal[2]}"] + test: { store: 2 } + compose: + - LLVMLink: + name: "stnt1.scatter.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + - FnCall: + - "{llvm_link}" + - [FnCall: ["crate::intrinsics::simd::simd_cast", [$data]], $pg, $bases, $offset] + + - name: svstnt1{size_literal[2]}_scatter[_{type[0]}base_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: + ["pg: {predicate[0]}", "bases: {sve_type[0]}", "data: {sve_type[1]}"] + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["stnt1{size_literal[2]}"] + test: { store: 2 } + compose: + - FnCall: + - "svstnt1{size_literal[2]}_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + - $data + + - name: svstnt1{size_literal[2]}_scatter[_{type[0]}base]_index[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "index: i64" + - "data: {sve_type[1]}" + types: + - [u32, [i32, u32], i16] + - [u64, [i64, u64], [i16, i32]] + assert_instr: ["stnt1{size_literal[2]}"] + test: { store: 2 } + compose: + - FnCall: + - "svstnt1{size_literal[2]}_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + - $data + + - name: svaba[{_n}_{type}] + attr: [*sve-unstable] + doc: Absolute difference and accumulate + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind}aba"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind}aba.{sve_type}" } + + - name: svqabs[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Saturating absolute value + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + assert_instr: [sqabs] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "sqabs.{sve_type}" } + + - name: svabdlb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Absolute difference long (bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}abdlb"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}abdlb.{sve_type[0]}" } + + - name: svabdlt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Absolute difference long (top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}abdlt"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}abdlt.{sve_type[0]}" } + + - name: svabalb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Absolute difference long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}abalb"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind[0].su}abalb.{sve_type[0]}" } + + - name: svabalt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Absolute difference long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}abalt"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind[0].su}abalt.{sve_type[0]}" } + + - name: svbcax[{_n}_{type}] + attr: [*sve-unstable] + doc: Bitwise clear and exclusive OR + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [bcax] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op3 + compose: + - LLVMLink: { name: "bcax.{sve_type}" } + + - name: sveorbt[{_n}_{type}] + attr: [*sve-unstable] + doc: Interleaving exclusive OR (bottom, top) + arguments: ["odd: {sve_type}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [eorbt] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op2 + compose: + - LLVMLink: { name: "eorbt.{sve_type}" } + + - name: sveortb[{_n}_{type}] + attr: [*sve-unstable] + doc: Interleaving exclusive OR (top, bottom) + arguments: ["even: {sve_type}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [eortb] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op2 + compose: + - LLVMLink: { name: "eortb.{sve_type}" } + + - name: sveor3[{_n}_{type}] + attr: [*sve-unstable] + doc: Bitwise exclusive OR of three vectors + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [eor3] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op3 + compose: + - LLVMLink: { name: "eor3.{sve_type}" } + + - name: svbsl[{_n}_{type}] + attr: [*sve-unstable] + doc: Bitwise select + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [bsl] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op3 + compose: + - LLVMLink: { name: "bsl.{sve_type}" } + + - name: svbsl1n[{_n}_{type}] + attr: [*sve-unstable] + doc: Bitwise select with first input inverted + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [bsl1n] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op3 + compose: + - LLVMLink: { name: "bsl1n.{sve_type}" } + + - name: svbsl2n[{_n}_{type}] + attr: [*sve-unstable] + doc: Bitwise select with second input inverted + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [bsl2n] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op3 + compose: + - LLVMLink: { name: "bsl2n.{sve_type}" } + + - name: svnbsl[{_n}_{type}] + attr: [*sve-unstable] + doc: Bitwise select + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [nbsl] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op3 + compose: + - LLVMLink: { name: "nbsl.{sve_type}" } + + - name: svxar[_n_{type}] + attr: [*sve-unstable] + doc: Bitwise exclusive OR and rotate right + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM3: i32"] + constraints: [{ variable: IMM3, range: ["1", "{size}"] }] + assert_instr: [[xar, "IMM3 = 1"]] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + compose: + - LLVMLink: + name: "xar.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]] + + - name: svrax1[_{type}] + attr: [*sve-unstable] + doc: Bitwise rotate left by 1 and exclusive OR + target_features: [sve2-sha3] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [rax1] + types: [i64, u64] + compose: + - LLVMLink: { name: "rax1" } + + - name: svshllb[_n_{type[0]}] + attr: [*sve-unstable] + doc: Shift left long (bottom) + arguments: ["op1: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["0", "{size_minus_one[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [["{type_kind[0].su}shllb", "IMM2 = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}shllb.{sve_type[0]}" + arguments: ["op1: {sve_type[1]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svshllt[_n_{type[0]}] + attr: [*sve-unstable] + doc: Shift left long (top) + arguments: ["op1: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["0", "{size_minus_one[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [["{type_kind[0].su}shllt", "IMM2 = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}shllt.{sve_type[0]}" + arguments: ["op1: {sve_type[1]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svrshl[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Rounding shift left + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i8, u8], i8] + - [[i16, u16], i16] + - [[i32, u32], i32] + - [[i64, u64], i64] + assert_instr: ["{type_kind[0].su}rshl"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}rshl.{sve_type[0]}" } + + - name: svqrshl[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Saturating rounding shift left + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i8, u8], i8] + - [[i16, u16], i16] + - [[i32, u32], i32] + - [[i64, u64], i64] + assert_instr: ["{type_kind[0].su}qrshl"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}qrshl.{sve_type[0]}" } + + - name: svqshl[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Saturating shift left + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i8, u8], i8] + - [[i16, u16], i16] + - [[i32, u32], i32] + - [[i64, u64], i64] + assert_instr: ["{type_kind[0].su}qshl"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}qshl.{sve_type[0]}" } + + - name: svqshlu[_n_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Saturating shift left unsigned + arguments: ["pg: {predicate[0]}", "op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["0", "{size_minus_one[1]}"] }] + types: + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + assert_instr: [[sqshlu, "IMM2 = 0"]] + zeroing_method: { select: op1 } + compose: + - LLVMLink: + name: "sqshlu.{sve_type[0]}" + arguments: ["pg: {predicate[0]}", "op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$pg, $op1, $IMM2]] + + - name: svsli[_n_{type}] + attr: [*sve-unstable] + doc: Shift left and insert + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM3: i32"] + constraints: [{ variable: IMM3, range: ["0", "{size_minus_one}"] }] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [[sli, "IMM3 = 0"]] + compose: + - LLVMLink: + name: "sli.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]] + + - name: svrshr[_n_{type}]{_mxz} + attr: [*sve-unstable] + doc: Rounding shift right + arguments: ["pg: {predicate}", "op1: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size}"] }] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [["{type_kind.su}rshr", "IMM2 = 1"]] + zeroing_method: { select: op1 } + compose: + - LLVMLink: + name: "{type_kind.su}rshr.{sve_type}" + arguments: ["pg: {predicate}", "op1: {sve_type}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$pg, $op1, $IMM2]] + + - name: svrsra[_n_{type}] + attr: [*sve-unstable] + doc: Rounding shift right and accumulate + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM3: i32"] + constraints: [{ variable: IMM3, range: ["1", "{size}"] }] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [["{type_kind.su}rsra", "IMM3 = 1"]] + compose: + - LLVMLink: + name: "{type_kind.su}rsra.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]] + + - name: svrshrnb[_n_{type[0]}] + attr: [*sve-unstable] + doc: Rounding shift right narrow (bottom) + arguments: ["op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [[rshrnb, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "rshrnb.{sve_type[0]}" + arguments: ["op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svrshrnt[_n_{type[0]}] + attr: [*sve-unstable] + doc: Rounding shift right narrow (top) + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [[rshrnt, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "rshrnt.{sve_type[0]}" + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]] + + - name: svqrshrnb[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating rounding shift right narrow (bottom) + arguments: ["op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [["{type_kind[0].su}qrshrnb", "IMM2 = 1"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}qrshrnb.{sve_type[0]}" + arguments: ["op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svqrshrnt[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating rounding shift right narrow (top) + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [["{type_kind[0].su}qrshrnt", "IMM2 = 1"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}qrshrnt.{sve_type[0]}" + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]] + + - name: svqrshrunb[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating rounding shift right unsigned narrow (bottom) + arguments: ["op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, u8] + - [i32, u16] + - [i64, u32] + assert_instr: [[sqrshrunb, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "sqrshrunb.{sve_type[0]}" + arguments: ["op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svqrshrunt[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating rounding shift right unsigned narrow (top) + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, u8] + - [i32, u16] + - [i64, u32] + assert_instr: [[sqrshrunt, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "sqrshrunt.{sve_type[0]}" + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]] + + - name: svqshrnb[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating shift right narrow (bottom) + arguments: ["op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [["{type_kind[0].su}qshrnb", "IMM2 = 1"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}qshrnb.{sve_type[0]}" + arguments: ["op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svqshrnt[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating shift right narrow (top) + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [["{type_kind[0].su}qshrnt", "IMM2 = 1"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}qshrnt.{sve_type[0]}" + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]] + + - name: svqshrunb[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating shift right unsigned narrow (bottom) + arguments: ["op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, u8] + - [i32, u16] + - [i64, u32] + assert_instr: [[sqshrunb, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "sqshrunb.{sve_type[0]}" + arguments: ["op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svqshrunt[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating shift right unsigned narrow (top) + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, u8] + - [i32, u16] + - [i64, u32] + assert_instr: [[sqshrunt, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "sqshrunt.{sve_type[0]}" + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]] + + - name: svsra[_n_{type}] + attr: [*sve-unstable] + doc: Shift right and accumulate + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM3: i32"] + constraints: [{ variable: IMM3, range: ["1", "{size}"] }] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [["{type_kind.su}sra", "IMM3 = 1"]] + compose: + - LLVMLink: + name: "{type_kind.su}sra.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]] + + - name: svsri[_n_{type}] + attr: [*sve-unstable] + doc: Shift right and insert + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM3: i32"] + constraints: [{ variable: IMM3, range: ["1", "{size}"] }] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [[sri, "IMM3 = 1"]] + compose: + - LLVMLink: + name: "sri.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]] + + - name: svshrnb[_n_{type[0]}] + attr: [*sve-unstable] + doc: Shift right narrow (bottom) + arguments: ["op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [[shrnb, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "shrnb.{sve_type[0]}" + arguments: ["op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svshrnt[_n_{type[0]}] + attr: [*sve-unstable] + doc: Shift right narrow (top) + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [[shrnt, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "shrnt.{sve_type[0]}" + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]] + + - name: svqxtnb[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating extract narrow (bottom) + arguments: ["op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}qxtnb"] + compose: + - LLVMLink: { name: "{type_kind[0].su}qxtnb.{sve_type[0]}" } + + - name: svqxtnt[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating extract narrow (top) + arguments: ["even: {sve_type[1]}", "op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}qxtnt"] + compose: + - LLVMLink: { name: "{type_kind[0].su}qxtnt.{sve_type[0]}" } + + - name: svqxtunb[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating extract unsigned narrow (bottom) + arguments: ["op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, u8] + - [i32, u16] + - [i64, u32] + assert_instr: [sqxtunb] + compose: + - LLVMLink: { name: "sqxtunb.{sve_type[0]}" } + + - name: svqxtunt[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating extract unsigned narrow (top) + arguments: ["even: {sve_type[1]}", "op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, u8] + - [i32, u16] + - [i64, u32] + assert_instr: [sqxtunt] + compose: + - LLVMLink: { name: "sqxtunt.{sve_type[0]}" } + + - name: svmovlb[_{type[0]}] + attr: [*sve-unstable] + doc: Move long (bottom) + arguments: ["op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}shllb"] + compose: + - FnCall: ["svshllb_n_{type[0]}", [$op], [0]] + + - name: svmovlt[_{type[0]}] + attr: [*sve-unstable] + doc: Move long (top) + arguments: ["op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}shllt"] + compose: + - FnCall: ["svshllt_n_{type[0]}", [$op], [0]] + + - name: svunpkhi[_{type[0]}] + attr: [*sve-unstable] + doc: Unpack and extend high half + arguments: ["op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}unpkhi"] + compose: + - LLVMLink: { name: "{type_kind[0].su}unpkhi.{sve_type[0]}" } + + - name: svunpkhi[_b] + attr: [*sve-unstable] + doc: Unpack and extend high half + arguments: ["op: svbool_t"] + return_type: "svbool8_t" + assert_instr: [punpkhi] + compose: + - LLVMLink: { name: "punpkhi.nxv16i1" } + + - name: svunpklo[_{type[0]}] + attr: [*sve-unstable] + doc: Unpack and extend low half + arguments: ["op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}unpklo"] + compose: + - LLVMLink: { name: "{type_kind[0].su}unpklo.{sve_type[0]}" } + + - name: svunpklo[_b] + attr: [*sve-unstable] + doc: Unpack and extend low half + arguments: ["op: svbool_t"] + return_type: "svbool8_t" + assert_instr: [punpklo] + compose: + - LLVMLink: { name: "punpklo.nxv16i1" } + + - name: svaddp[_{type}]{_mx} + attr: [*sve-unstable] + doc: Add pairwise + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.f}addp"] + compose: + - LLVMLink: { name: "{type_kind.f}addp.{sve_type}" } + + - name: svadalp[_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Add and accumulate long pairwise + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}adalp"] + zeroing_method: { select: op1 } + compose: + - LLVMLink: { name: "{type_kind[0].su}adalp.{sve_type[0]}" } + + - name: svmaxp[_{type}]{_mx} + attr: [*sve-unstable] + doc: Maximum pairwise + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.fsu}maxp"] + compose: + - LLVMLink: { name: "{type_kind.fsu}maxp.{sve_type}" } + + - name: svmaxnmp[_{type}]{_mx} + attr: [*sve-unstable] + doc: Maximum number pairwise + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: ["fmaxnmp"] + compose: + - LLVMLink: { name: "fmaxnmp.{sve_type}" } + + - name: svminp[_{type}]{_mx} + attr: [*sve-unstable] + doc: Minimum pairwise + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.fsu}minp"] + compose: + - LLVMLink: { name: "{type_kind.fsu}minp.{sve_type}" } + + - name: svminnmp[_{type}]{_mx} + attr: [*sve-unstable] + doc: Minimum number pairwise + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: ["fminnmp"] + compose: + - LLVMLink: { name: "fminnmp.{sve_type}" } + + - name: svmul_lane[_{type}] + attr: [*sve-unstable] + doc: Multiply + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + assert_instr: [["{type_kind.f}mul", "IMM_INDEX = 0"]] + types: [f32, f64, i16, i32, i64, u16, u32, u64] + compose: + - LLVMLink: + name: "{type_kind.f}mul.lane.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm_index: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, IMM_INDEX]] + + - name: svqdmulh[{_n}_{type}] + attr: [*sve-unstable] + doc: Saturating doubling multiply high + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + assert_instr: [sqdmulh] + n_variant_op: op2 + compose: + - LLVMLink: { name: "sqdmulh.{sve_type}" } + + - name: svqdmulh_lane[_{type}] + attr: [*sve-unstable] + doc: Saturating doubling multiply high + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + assert_instr: [["sqdmulh", "IMM_INDEX = 0"]] + types: [i16, i32, i64] + compose: + - LLVMLink: + name: "sqdmulh.lane.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm_index: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, IMM_INDEX]] + + - name: svqrdmulh[{_n}_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling multiply high + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + assert_instr: [sqrdmulh] + n_variant_op: op2 + compose: + - LLVMLink: { name: "sqrdmulh.{sve_type}" } + + - name: svqrdmulh_lane[_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling multiply high + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + assert_instr: [["sqrdmulh", "IMM_INDEX = 0"]] + types: [i16, i32, i64] + compose: + - LLVMLink: + name: "sqrdmulh.lane.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm_index: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, IMM_INDEX]] + + - name: svqdmullb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply long (bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: [sqdmullb] + n_variant_op: op2 + compose: + - LLVMLink: { name: "sqdmullb.{sve_type[0]}" } + + - name: svqdmullb_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply long (bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + assert_instr: [["sqdmullb", "IMM_INDEX = 0"]] + types: [[i32, i16], [i64, i32]] + compose: + - LLVMLink: + name: "sqdmullb.lane.{sve_type[0]}" + arguments: + ["op1: {sve_type[1]}", "op2: {sve_type[1]}", "imm_index: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, IMM_INDEX]] + + - name: svqdmullt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply long (top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: [sqdmullt] + n_variant_op: op2 + compose: + - LLVMLink: { name: "sqdmullt.{sve_type[0]}" } + + - name: svqdmullt_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply long (top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + assert_instr: [["sqdmullt", "IMM_INDEX = 0"]] + types: [[i32, i16], [i64, i32]] + compose: + - LLVMLink: + name: "sqdmullt.lane.{sve_type[0]}" + arguments: + ["op1: {sve_type[1]}", "op2: {sve_type[1]}", "imm_index: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, IMM_INDEX]] + + - name: svmullb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Multiply long (bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}mullb"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}mullb.{sve_type[0]}" } + + - name: svmullb_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Multiply long (bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i32, i16] + - [i64, i32] + - [u32, u16] + - [u64, u32] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + assert_instr: [["{type_kind[0].su}mullb", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}mullb.lane.{sve_type[0]}" + arguments: + ["op1: {sve_type[1]}", "op2: {sve_type[1]}", "imm_index: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM_INDEX]] + + - name: svmullt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Multiply long (top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}mullt"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}mullt.{sve_type[0]}" } + + - name: svmullt_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Multiply long (top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i32, i16] + - [i64, i32] + - [u32, u16] + - [u64, u32] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + assert_instr: [["{type_kind[0].su}mullt", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}mullt.lane.{sve_type[0]}" + arguments: + ["op1: {sve_type[1]}", "op2: {sve_type[1]}", "imm_index: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM_INDEX]] + + - name: svrecpe[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Reciprocal estimate + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [u32] + assert_instr: [urecpe] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "urecpe.{sve_type}" } + + - name: svrsqrte[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Reciprocal square root estimate + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [u32] + assert_instr: [ursqrte] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "ursqrte.{sve_type}" } + + - name: svmla_lane[_{type}] + attr: [*sve-unstable] + doc: Multiply-add, addend first + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + types: [i16, i32, i64, u16, u32, u64] + assert_instr: [[mla, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "mla.lane.{sve_type}" + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svmls_lane[_{type}] + attr: [*sve-unstable] + doc: Multiply-subtract, minuend first + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + types: [i16, i32, i64, u16, u32, u64] + assert_instr: [[mls, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "mls.lane.{sve_type}" + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svmlalb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-add long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + assert_instr: ["{type_kind[0].su}mlalb"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind[0].su}mlalb.{sve_type[0]}" } + + - name: svmlalb_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-add long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32], [u32, u16], [u64, u32]] + assert_instr: [["{type_kind[0].su}mlalb", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}mlalb.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svmlalt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-add long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + assert_instr: ["{type_kind[0].su}mlalt"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind[0].su}mlalt.{sve_type[0]}" } + + - name: svmlalt_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-add long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32], [u32, u16], [u64, u32]] + assert_instr: [["{type_kind[0].su}mlalt", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}mlalt.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svmlslb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-subtract long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + assert_instr: ["{type_kind[0].su}mlslb"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind[0].su}mlslb.{sve_type[0]}" } + + - name: svmlslb_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-subtract long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32], [u32, u16], [u64, u32]] + assert_instr: [["{type_kind[0].su}mlslb", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}mlslb.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svmlslt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-subtract long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + assert_instr: ["{type_kind[0].su}mlslt"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind[0].su}mlslt.{sve_type[0]}" } + + - name: svmlslt_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-subtract long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32], [u32, u16], [u64, u32]] + assert_instr: [["{type_kind[0].su}mlslt", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}mlslt.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svqrdmlah[{_n}_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling multiply-add high + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + assert_instr: [sqrdmlah] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqrdmlah.{sve_type}" } + + - name: svqrdmlah_lane[_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling multiply-add high + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + types: [i16, i32, i64] + assert_instr: [[sqrdmlah, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "sqrdmlah.lane.{sve_type}" + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svqrdmlsh[{_n}_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling multiply-subtract high + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + assert_instr: [sqrdmlsh] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqrdmlsh.{sve_type}" } + + - name: svqrdmlsh_lane[_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling multiply-subtract high + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + types: [i16, i32, i64] + assert_instr: [[sqrdmlsh, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "sqrdmlsh.lane.{sve_type}" + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svqdmlalb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-add long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: ["sqdmlalb"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqdmlalb.{sve_type[0]}" } + + - name: svqdmlalb_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-add long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32]] + assert_instr: [["sqdmlalb", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "sqdmlalb.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svqdmlalbt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-add long (bottom × top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: ["sqdmlalbt"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqdmlalbt.{sve_type[0]}" } + + - name: svqdmlalt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-add long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: ["sqdmlalt"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqdmlalt.{sve_type[0]}" } + + - name: svqdmlalt_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-add long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32]] + assert_instr: [["sqdmlalt", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "sqdmlalt.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svqdmlslb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-subtract long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: ["sqdmlslb"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqdmlslb.{sve_type[0]}" } + + - name: svqdmlslb_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-subtract long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32]] + assert_instr: [["sqdmlslb", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "sqdmlslb.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svqdmlslbt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-subtract long (bottom × top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: ["sqdmlslbt"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqdmlslbt.{sve_type[0]}" } + + - name: svqdmlslt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-subtract long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: ["sqdmlslt"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqdmlslt.{sve_type[0]}" } + + - name: svqdmlslt_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-subtract long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32]] + assert_instr: [["sqdmlslt", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "sqdmlslt.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svqneg[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Saturating negate + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + assert_instr: [sqneg] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "sqneg.{sve_type}" } + + - name: svadclb[{_n}_{type}] + attr: [*sve-unstable] + doc: Add with carry long (bottom) + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [u32, u64] + assert_instr: [adclb] + n_variant_op: op3 + compose: + - LLVMLink: { name: "adclb.{sve_type}" } + + - name: svadclt[{_n}_{type}] + attr: [*sve-unstable] + doc: Add with carry long (top) + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [u32, u64] + assert_instr: [adclt] + n_variant_op: op3 + compose: + - LLVMLink: { name: "adclt.{sve_type}" } + + - name: svqadd[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Saturating add + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.su}qadd"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}qadd.{sve_type}" } + + - name: svsqadd[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Saturating add with signed addend + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [u8, i8] + - [u16, i16] + - [u32, i32] + - [u64, i64] + assert_instr: [usqadd] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "usqadd.{sve_type[0]}" } + + - name: svuqadd[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Saturating add with unsigned addend + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + assert_instr: [suqadd] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "suqadd.{sve_type[0]}" } + + - name: svaddlb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Add long (bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}addlb"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}addlb.{sve_type[0]}" } + + - name: svaddlbt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Add long (bottom + top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + assert_instr: ["{type_kind[0].su}addlbt"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}addlbt.{sve_type[0]}" } + + - name: svaddlt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Add long (top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}addlt"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}addlt.{sve_type[0]}" } + + - name: svaddwb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Add wide (bottom) + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}addwb"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}addwb.{sve_type[0]}" } + + - name: svaddwt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Add wide (top) + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}addwt"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}addwt.{sve_type[0]}" } + + - name: svlogb[_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Base 2 logarithm as integer + arguments: + ["inactive: {sve_type[1]}", "pg: {predicate[0]}", "op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: [[f32, i32], [f64, i64]] + assert_instr: [flogb] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "flogb.{sve_type[0]}" } + + - name: svpmul[{_n}_{type}] + attr: [*sve-unstable] + doc: Polynomial multiply + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8] + assert_instr: [pmul] + n_variant_op: op2 + compose: + - LLVMLink: { name: "pmul.{sve_type}" } + + - name: svpmullb_pair[{_n}_{type}] + attr: [*sve-unstable] + doc: Polynomial multiply long (bottom) + target_features: [sve2-aes] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8, u32, u64] + assert_instr: [pmullb] + n_variant_op: op2 + compose: + - LLVMLink: { name: "pmullb.pair.{sve_type}" } + + - name: svpmullb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Polynomial multiply long (bottom) + target_features: [sve2-aes] + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[u16, u8], [u64, u32]] + assert_instr: [pmullb] + n_variant_op: op2 + compose: + - FnCall: + - "crate::intrinsics::transmute_unchecked" + - [FnCall: ["svpmullb_pair_{type[1]}", [$op1, $op2]]] + - [] + - true + + - name: svpmullt_pair[{_n}_{type}] + attr: [*sve-unstable] + doc: Polynomial multiply long (top) + target_features: [sve2-aes] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8, u32, u64] + assert_instr: [pmullt] + n_variant_op: op2 + compose: + - LLVMLink: { name: "pmullt.pair.{sve_type}" } + + - name: svpmullt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Polynomial multiply long (top) + target_features: [sve2-aes] + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[u16, u8], [u64, u32]] + assert_instr: [pmullt] + n_variant_op: op2 + compose: + - FnCall: + - "crate::intrinsics::transmute_unchecked" + - [FnCall: ["svpmullt_pair_{type[1]}", [$op1, $op2]]] + - [] + - true + + - name: svaesd[_{type}] + attr: [*sve-unstable] + doc: AES single round decryption + target_features: [sve2-aes] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8] + assert_instr: [aesd] + compose: + - LLVMLink: { name: "aesd" } + + - name: svaese[_{type}] + attr: [*sve-unstable] + doc: AES single round encryption + target_features: [sve2-aes] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8] + assert_instr: [aese] + compose: + - LLVMLink: { name: "aese" } + + - name: svaesmc[_{type}] + attr: [*sve-unstable] + doc: AES mix columns + target_features: [sve2-aes] + arguments: ["op: {sve_type}"] + return_type: "{sve_type}" + types: [u8] + assert_instr: [aesmc] + compose: + - LLVMLink: { name: "aesmc" } + + - name: svaesimc[_{type}] + attr: [*sve-unstable] + doc: AES inverse mix columns + target_features: [sve2-aes] + arguments: ["op: {sve_type}"] + return_type: "{sve_type}" + types: [u8] + assert_instr: [aesimc] + compose: + - LLVMLink: { name: "aesimc" } + + - name: svsm4e[_{type}] + attr: [*sve-unstable] + doc: SM4 encryption and decryption + target_features: [sve2-sm4] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u32] + assert_instr: [sm4e] + compose: + - LLVMLink: { name: "sm4e" } + + - name: svsm4ekey[_{type}] + attr: [*sve-unstable] + doc: SM4 key updates + target_features: [sve2-sm4] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u32] + assert_instr: [sm4ekey] + compose: + - LLVMLink: { name: "sm4ekey" } From a753cf4d77ebb6c39e984200087b1976c1d80c38 Mon Sep 17 00:00:00 2001 From: David Wood Date: Thu, 15 Jan 2026 16:29:25 +0000 Subject: [PATCH 39/64] core_arch: generated sve intrinsics Following from previous commit, this commit only contains generated code from the SVE intrinsic specifications Co-authored-by: Jamie Cunliffe Co-authored-by: Luca Vizzarro Co-authored-by: Adam Gemmell Co-authored-by: Jacob Bramley --- .../core_arch/src/aarch64/sve/generated.rs | 44957 ++++++++++++++++ .../src/aarch64/sve/ld_st_tests_aarch64.rs | 9345 ++++ .../core_arch/src/aarch64/sve2/generated.rs | 23856 ++++++++ .../src/aarch64/sve2/ld_st_tests_aarch64.rs | 2482 + 4 files changed, 80640 insertions(+) create mode 100644 library/stdarch/crates/core_arch/src/aarch64/sve/ld_st_tests_aarch64.rs create mode 100644 library/stdarch/crates/core_arch/src/aarch64/sve2/ld_st_tests_aarch64.rs diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs index 8b137891791fe..6edfc8e159a75 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs @@ -1 +1,44958 @@ +// This code is automatically generated. DO NOT MODIFY. +// +// Instead, modify `crates/stdarch-gen-arm/spec/` and run the following command to re-generate this file: +// +// ``` +// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec +// ``` +#![allow(improper_ctypes)] +#[cfg(test)] +use stdarch_test::assert_instr; + +use super::*; +use crate::core_arch::arch::aarch64::*; + +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabd.nxv4f32")] + fn _svabd_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svabd_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svabd_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svabd_f32_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svabd_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svabd_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svabd_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabd.nxv2f64")] + fn _svabd_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svabd_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svabd_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svabd_f64_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svabd_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svabd_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svabd_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv16i8")] + fn _svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svabd_s8_m(pg, op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svabd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svabd_s8_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svabd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svabd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svabd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv8i16")] + fn _svabd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svabd_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svabd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svabd_s16_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svabd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svabd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svabd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv4i32")] + fn _svabd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svabd_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svabd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svabd_s32_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svabd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svabd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svabd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv2i64")] + fn _svabd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svabd_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svabd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svabd_s64_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svabd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svabd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svabd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv16i8")] + fn _svabd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svabd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svabd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svabd_u8_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svabd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svabd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svabd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv8i16")] + fn _svabd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svabd_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svabd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svabd_u16_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svabd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svabd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svabd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv4i32")] + fn _svabd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svabd_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svabd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svabd_u32_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svabd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svabd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svabd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv2i64")] + fn _svabd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svabd_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svabd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svabd_u64_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svabd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svabd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svabd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabs.nxv4f32")] + fn _svabs_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svabs_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svabs_f32_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svabs_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabs.nxv2f64")] + fn _svabs_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svabs_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svabs_f64_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svabs_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv16i8")] + fn _svabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svabs_s8_m(inactive, pg, op) } +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svabs_s8_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svabs_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv8i16")] + fn _svabs_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svabs_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svabs_s16_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svabs_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv4i32")] + fn _svabs_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svabs_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svabs_s32_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svabs_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv2i64")] + fn _svabs_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svabs_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svabs_s64_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svabs_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Absolute compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facge.nxv4f32")] + fn _svacge_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svacge_f32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Absolute compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svacge_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facge.nxv2f64")] + fn _svacge_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svacge_f64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Absolute compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svacge_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facgt.nxv4f32")] + fn _svacgt_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svacgt_f32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svacgt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facgt.nxv2f64")] + fn _svacgt_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svacgt_f64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svacgt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svacge_f32(pg, op2, op1) +} +#[doc = "Absolute compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svacle_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svacge_f64(pg, op2, op1) +} +#[doc = "Absolute compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svacle_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svacgt_f32(pg, op2, op1) +} +#[doc = "Absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svaclt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svacgt_f64(pg, op2, op1) +} +#[doc = "Absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svaclt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadd.nxv4f32")] + fn _svadd_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svadd_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svadd_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svadd_f32_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svadd_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svadd_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svadd_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadd.nxv2f64")] + fn _svadd_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svadd_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svadd_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svadd_f64_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svadd_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svadd_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svadd_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv16i8")] + fn _svadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svadd_s8_m(pg, op1, op2) } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svadd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svadd_s8_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svadd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svadd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv8i16")] + fn _svadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svadd_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svadd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svadd_s16_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svadd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svadd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv4i32")] + fn _svadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svadd_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svadd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svadd_s32_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svadd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svadd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv2i64")] + fn _svadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svadd_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svadd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svadd_s64_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svadd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svadd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svadd_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svadd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svadd_u8_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svadd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svadd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svadd_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svadd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svadd_u16_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svadd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svadd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svadd_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svadd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svadd_u32_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svadd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svadd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svadd_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svadd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svadd_u64_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svadd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svadd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Add reduction (strictly-ordered)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadda[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadda))] +pub fn svadda_f32(pg: svbool_t, initial: f32, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadda.nxv4f32")] + fn _svadda_f32(pg: svbool4_t, initial: f32, op: svfloat32_t) -> f32; + } + unsafe { _svadda_f32(pg.sve_into(), initial, op) } +} +#[doc = "Add reduction (strictly-ordered)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadda[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadda))] +pub fn svadda_f64(pg: svbool_t, initial: f64, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadda.nxv2f64")] + fn _svadda_f64(pg: svbool2_t, initial: f64, op: svfloat64_t) -> f64; + } + unsafe { _svadda_f64(pg.sve_into(), initial, op) } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(faddv))] +pub fn svaddv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddv.nxv4f32")] + fn _svaddv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svaddv_f32(pg.sve_into(), op) } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(faddv))] +pub fn svaddv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddv.nxv2f64")] + fn _svaddv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svaddv_f64(pg.sve_into(), op) } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv2i64")] + fn _svaddv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svaddv_s64(pg.sve_into(), op) } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv2i64")] + fn _svaddv_u64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svaddv_u64(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddv))] +pub fn svaddv_s8(pg: svbool_t, op: svint8_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv16i8")] + fn _svaddv_s8(pg: svbool_t, op: svint8_t) -> i64; + } + unsafe { _svaddv_s8(pg, op) } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddv))] +pub fn svaddv_s16(pg: svbool_t, op: svint16_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv8i16")] + fn _svaddv_s16(pg: svbool8_t, op: svint16_t) -> i64; + } + unsafe { _svaddv_s16(pg.sve_into(), op) } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddv))] +pub fn svaddv_s32(pg: svbool_t, op: svint32_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv4i32")] + fn _svaddv_s32(pg: svbool4_t, op: svint32_t) -> i64; + } + unsafe { _svaddv_s32(pg.sve_into(), op) } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_u8(pg: svbool_t, op: svuint8_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv16i8")] + fn _svaddv_u8(pg: svbool_t, op: svint8_t) -> i64; + } + unsafe { _svaddv_u8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_u16(pg: svbool_t, op: svuint16_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv8i16")] + fn _svaddv_u16(pg: svbool8_t, op: svint16_t) -> i64; + } + unsafe { _svaddv_u16(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_u32(pg: svbool_t, op: svuint32_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv4i32")] + fn _svaddv_u32(pg: svbool4_t, op: svint32_t) -> i64; + } + unsafe { _svaddv_u32(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Compute vector addresses for 8-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u32base]_[s32]offset)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrb_u32base_s32offset(bases: svuint32_t, offsets: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrb.nxv4i32")] + fn _svadrb_u32base_s32offset(bases: svint32_t, offsets: svint32_t) -> svint32_t; + } + unsafe { _svadrb_u32base_s32offset(bases.as_signed(), offsets).as_unsigned() } +} +#[doc = "Compute vector addresses for 16-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u32base]_[s32]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrh_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrh.nxv4i32")] + fn _svadrh_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svadrh_u32base_s32index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 32-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u32base]_[s32]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrw_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrw.nxv4i32")] + fn _svadrw_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svadrw_u32base_s32index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 64-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u32base]_[s32]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrd_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrd.nxv4i32")] + fn _svadrd_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svadrd_u32base_s32index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 8-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u32base]_[u32]offset)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrb_u32base_u32offset(bases: svuint32_t, offsets: svuint32_t) -> svuint32_t { + unsafe { svadrb_u32base_s32offset(bases, offsets.as_signed()) } +} +#[doc = "Compute vector addresses for 16-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u32base]_[u32]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrh_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svadrh_u32base_s32index(bases, indices.as_signed()) } +} +#[doc = "Compute vector addresses for 32-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u32base]_[u32]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrw_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svadrw_u32base_s32index(bases, indices.as_signed()) } +} +#[doc = "Compute vector addresses for 64-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u32base]_[u32]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrd_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svadrd_u32base_s32index(bases, indices.as_signed()) } +} +#[doc = "Compute vector addresses for 8-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u64base]_[s64]offset)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrb_u64base_s64offset(bases: svuint64_t, offsets: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrb.nxv2i64")] + fn _svadrb_u64base_s64offset(bases: svint64_t, offsets: svint64_t) -> svint64_t; + } + unsafe { _svadrb_u64base_s64offset(bases.as_signed(), offsets).as_unsigned() } +} +#[doc = "Compute vector addresses for 16-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u64base]_[s64]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrh_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrh.nxv2i64")] + fn _svadrh_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svadrh_u64base_s64index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 32-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u64base]_[s64]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrw_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrw.nxv2i64")] + fn _svadrw_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svadrw_u64base_s64index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 64-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u64base]_[s64]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrd_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrd.nxv2i64")] + fn _svadrd_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svadrd_u64base_s64index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 8-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u64base]_[u64]offset)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrb_u64base_u64offset(bases: svuint64_t, offsets: svuint64_t) -> svuint64_t { + unsafe { svadrb_u64base_s64offset(bases, offsets.as_signed()) } +} +#[doc = "Compute vector addresses for 16-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u64base]_[u64]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrh_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svadrh_u64base_s64index(bases, indices.as_signed()) } +} +#[doc = "Compute vector addresses for 32-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u64base]_[u64]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrw_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svadrw_u64base_s64index(bases, indices.as_signed()) } +} +#[doc = "Compute vector addresses for 64-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u64base]_[u64]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrd_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svadrd_u64base_s64index(bases, indices.as_signed()) } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.z.nvx16i1")] + fn _svand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svand_b_z(pg, op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv16i8")] + fn _svand_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svand_s8_m(pg, op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svand_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svand_s8_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svand_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svand_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svand_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv8i16")] + fn _svand_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svand_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svand_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svand_s16_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svand_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svand_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svand_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv4i32")] + fn _svand_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svand_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svand_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svand_s32_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svand_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svand_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svand_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv2i64")] + fn _svand_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svand_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svand_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svand_s64_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svand_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svand_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svand_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svand_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svand_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svand_u8_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svand_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svand_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svand_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svand_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svand_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svand_u16_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svand_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svand_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svand_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svand_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svand_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svand_u32_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svand_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svand_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svand_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svand_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svand_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svand_u64_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svand_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svand_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svand_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv16i8")] + fn _svandv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svandv_s8(pg, op) } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv8i16")] + fn _svandv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svandv_s16(pg.sve_into(), op) } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv4i32")] + fn _svandv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svandv_s32(pg.sve_into(), op) } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv2i64")] + fn _svandv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svandv_s64(pg.sve_into(), op) } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { svandv_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { svandv_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { svandv_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { svandv_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv16i8")] + fn _svasr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svasr_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svasr_s8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svasr_s8_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svasr_s8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svasr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svasr_s8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv8i16")] + fn _svasr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svasr_s16_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svasr_s16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svasr_s16_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svasr_s16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svasr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svasr_s16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv4i32")] + fn _svasr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svasr_s32_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svasr_s32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svasr_s32_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svasr_s32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svasr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svasr_s32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv2i64")] + fn _svasr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svasr_s64_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svasr_s64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svasr_s64_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svasr_s64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svasr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svasr_s64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.asr.wide.nxv16i8" + )] + fn _svasr_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svint8_t; + } + unsafe { _svasr_wide_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svasr_wide_s8_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + svasr_wide_s8_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svasr_wide_s8_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + svasr_wide_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svasr_wide_s8_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.asr.wide.nxv8i16" + )] + fn _svasr_wide_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svint16_t; + } + unsafe { _svasr_wide_s16_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svasr_wide_s16_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + svasr_wide_s16_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svasr_wide_s16_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + svasr_wide_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svasr_wide_s16_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.asr.wide.nxv4i32" + )] + fn _svasr_wide_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svasr_wide_s32_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svasr_wide_s32_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + svasr_wide_s32_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svasr_wide_s32_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + svasr_wide_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svasr_wide_s32_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s8_m(pg: svbool_t, op1: svint8_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv16i8")] + fn _svasrd_n_s8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t; + } + unsafe { _svasrd_n_s8_m(pg, op1, IMM2) } +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s8_x(pg: svbool_t, op1: svint8_t) -> svint8_t { + svasrd_n_s8_m::(pg, op1) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s8_z(pg: svbool_t, op1: svint8_t) -> svint8_t { + svasrd_n_s8_m::(pg, svsel_s8(pg, op1, svdup_n_s8(0))) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s16_m(pg: svbool_t, op1: svint16_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv8i16")] + fn _svasrd_n_s16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t; + } + unsafe { _svasrd_n_s16_m(pg.sve_into(), op1, IMM2) } +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s16_x(pg: svbool_t, op1: svint16_t) -> svint16_t { + svasrd_n_s16_m::(pg, op1) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s16_z(pg: svbool_t, op1: svint16_t) -> svint16_t { + svasrd_n_s16_m::(pg, svsel_s16(pg, op1, svdup_n_s16(0))) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s32_m(pg: svbool_t, op1: svint32_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv4i32")] + fn _svasrd_n_s32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t; + } + unsafe { _svasrd_n_s32_m(pg.sve_into(), op1, IMM2) } +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s32_x(pg: svbool_t, op1: svint32_t) -> svint32_t { + svasrd_n_s32_m::(pg, op1) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s32_z(pg: svbool_t, op1: svint32_t) -> svint32_t { + svasrd_n_s32_m::(pg, svsel_s32(pg, op1, svdup_n_s32(0))) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s64_m(pg: svbool_t, op1: svint64_t) -> svint64_t { + static_assert_range!(IMM2, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv2i64")] + fn _svasrd_n_s64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t; + } + unsafe { _svasrd_n_s64_m(pg.sve_into(), op1, IMM2) } +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s64_x(pg: svbool_t, op1: svint64_t) -> svint64_t { + svasrd_n_s64_m::(pg, op1) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s64_z(pg: svbool_t, op1: svint64_t) -> svint64_t { + svasrd_n_s64_m::(pg, svsel_s64(pg, op1, svdup_n_s64(0))) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.z.nvx16i1")] + fn _svbic_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbic_b_z(pg, op1, op2) } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv16i8")] + fn _svbic_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svbic_s8_m(pg, op1, op2) } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svbic_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svbic_s8_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svbic_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svbic_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svbic_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv8i16")] + fn _svbic_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svbic_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svbic_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svbic_s16_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svbic_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svbic_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svbic_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv4i32")] + fn _svbic_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svbic_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svbic_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svbic_s32_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svbic_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svbic_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svbic_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv2i64")] + fn _svbic_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svbic_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svbic_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svbic_s64_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svbic_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svbic_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svbic_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svbic_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svbic_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svbic_u8_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svbic_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svbic_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svbic_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svbic_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svbic_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svbic_u16_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svbic_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svbic_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svbic_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svbic_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svbic_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svbic_u32_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svbic_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svbic_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svbic_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svbic_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svbic_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svbic_u64_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svbic_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svbic_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svbic_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Break after first true condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrka[_b]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(brka))] +pub fn svbrka_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brka.nxv16i1")] + fn _svbrka_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svbrka_b_m(inactive, pg, op) } +} +#[doc = "Break after first true condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrka[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(brka))] +pub fn svbrka_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brka.z.nxv16i1")] + fn _svbrka_b_z(pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svbrka_b_z(pg, op) } +} +#[doc = "Break before first true condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkb[_b]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(brkb))] +pub fn svbrkb_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkb.nxv16i1")] + fn _svbrkb_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svbrkb_b_m(inactive, pg, op) } +} +#[doc = "Break before first true condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkb[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(brkb))] +pub fn svbrkb_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkb.z.nxv16i1")] + fn _svbrkb_b_z(pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svbrkb_b_z(pg, op) } +} +#[doc = "Propagate break to next partition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkn[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(brkn))] +pub fn svbrkn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkn.z.nxv16i1")] + fn _svbrkn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbrkn_b_z(pg, op1, op2) } +} +#[doc = "Break after first true condition, propagating from previous partition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkpa[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(brkpa))] +pub fn svbrkpa_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.brkpa.z.nxv16i1" + )] + fn _svbrkpa_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbrkpa_b_z(pg, op1, op2) } +} +#[doc = "Break before first true condition, propagating from previous partition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkpb[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(brkpb))] +pub fn svbrkpb_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.brkpb.z.nxv16i1" + )] + fn _svbrkpb_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbrkpb_b_z(pg, op1, op2) } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, +) -> svfloat32_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcadd.nxv4f32")] + fn _svcadd_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + imm_rotation: i32, + ) -> svfloat32_t; + } + unsafe { _svcadd_f32_m(pg.sve_into(), op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, +) -> svfloat32_t { + svcadd_f32_m::(pg, op1, op2) +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, +) -> svfloat32_t { + svcadd_f32_m::(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, +) -> svfloat64_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcadd.nxv2f64")] + fn _svcadd_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + imm_rotation: i32, + ) -> svfloat64_t; + } + unsafe { _svcadd_f64_m(pg.sve_into(), op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, +) -> svfloat64_t { + svcadd_f64_m::(pg, op1, op2) +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, +) -> svfloat64_t { + svcadd_f64_m::(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_f32(pg: svbool_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv4f32")] + fn _svclasta_f32(pg: svbool4_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t; + } + unsafe { _svclasta_f32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_f64(pg: svbool_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv2f64")] + fn _svclasta_f64(pg: svbool2_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t; + } + unsafe { _svclasta_f64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv16i8")] + fn _svclasta_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t; + } + unsafe { _svclasta_s8(pg, fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_s16(pg: svbool_t, fallback: svint16_t, data: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv8i16")] + fn _svclasta_s16(pg: svbool8_t, fallback: svint16_t, data: svint16_t) -> svint16_t; + } + unsafe { _svclasta_s16(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_s32(pg: svbool_t, fallback: svint32_t, data: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv4i32")] + fn _svclasta_s32(pg: svbool4_t, fallback: svint32_t, data: svint32_t) -> svint32_t; + } + unsafe { _svclasta_s32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_s64(pg: svbool_t, fallback: svint64_t, data: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv2i64")] + fn _svclasta_s64(pg: svbool2_t, fallback: svint64_t, data: svint64_t) -> svint64_t; + } + unsafe { _svclasta_s64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_u8(pg: svbool_t, fallback: svuint8_t, data: svuint8_t) -> svuint8_t { + unsafe { svclasta_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_u16(pg: svbool_t, fallback: svuint16_t, data: svuint16_t) -> svuint16_t { + unsafe { svclasta_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_u32(pg: svbool_t, fallback: svuint32_t, data: svuint32_t) -> svuint32_t { + unsafe { svclasta_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_u64(pg: svbool_t, fallback: svuint64_t, data: svuint64_t) -> svuint64_t { + unsafe { svclasta_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_f32(pg: svbool_t, fallback: f32, data: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv4f32" + )] + fn _svclasta_n_f32(pg: svbool4_t, fallback: f32, data: svfloat32_t) -> f32; + } + unsafe { _svclasta_n_f32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_f64(pg: svbool_t, fallback: f64, data: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv2f64" + )] + fn _svclasta_n_f64(pg: svbool2_t, fallback: f64, data: svfloat64_t) -> f64; + } + unsafe { _svclasta_n_f64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv16i8" + )] + fn _svclasta_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8; + } + unsafe { _svclasta_n_s8(pg, fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_s16(pg: svbool_t, fallback: i16, data: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv8i16" + )] + fn _svclasta_n_s16(pg: svbool8_t, fallback: i16, data: svint16_t) -> i16; + } + unsafe { _svclasta_n_s16(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_s32(pg: svbool_t, fallback: i32, data: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv4i32" + )] + fn _svclasta_n_s32(pg: svbool4_t, fallback: i32, data: svint32_t) -> i32; + } + unsafe { _svclasta_n_s32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_s64(pg: svbool_t, fallback: i64, data: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv2i64" + )] + fn _svclasta_n_s64(pg: svbool2_t, fallback: i64, data: svint64_t) -> i64; + } + unsafe { _svclasta_n_s64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_u8(pg: svbool_t, fallback: u8, data: svuint8_t) -> u8 { + unsafe { svclasta_n_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_u16(pg: svbool_t, fallback: u16, data: svuint16_t) -> u16 { + unsafe { svclasta_n_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_u32(pg: svbool_t, fallback: u32, data: svuint32_t) -> u32 { + unsafe { svclasta_n_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_u64(pg: svbool_t, fallback: u64, data: svuint64_t) -> u64 { + unsafe { svclasta_n_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_f32(pg: svbool_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv4f32")] + fn _svclastb_f32(pg: svbool4_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t; + } + unsafe { _svclastb_f32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_f64(pg: svbool_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv2f64")] + fn _svclastb_f64(pg: svbool2_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t; + } + unsafe { _svclastb_f64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv16i8")] + fn _svclastb_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t; + } + unsafe { _svclastb_s8(pg, fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_s16(pg: svbool_t, fallback: svint16_t, data: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv8i16")] + fn _svclastb_s16(pg: svbool8_t, fallback: svint16_t, data: svint16_t) -> svint16_t; + } + unsafe { _svclastb_s16(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_s32(pg: svbool_t, fallback: svint32_t, data: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv4i32")] + fn _svclastb_s32(pg: svbool4_t, fallback: svint32_t, data: svint32_t) -> svint32_t; + } + unsafe { _svclastb_s32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_s64(pg: svbool_t, fallback: svint64_t, data: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv2i64")] + fn _svclastb_s64(pg: svbool2_t, fallback: svint64_t, data: svint64_t) -> svint64_t; + } + unsafe { _svclastb_s64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_u8(pg: svbool_t, fallback: svuint8_t, data: svuint8_t) -> svuint8_t { + unsafe { svclastb_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_u16(pg: svbool_t, fallback: svuint16_t, data: svuint16_t) -> svuint16_t { + unsafe { svclastb_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_u32(pg: svbool_t, fallback: svuint32_t, data: svuint32_t) -> svuint32_t { + unsafe { svclastb_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_u64(pg: svbool_t, fallback: svuint64_t, data: svuint64_t) -> svuint64_t { + unsafe { svclastb_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_f32(pg: svbool_t, fallback: f32, data: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv4f32" + )] + fn _svclastb_n_f32(pg: svbool4_t, fallback: f32, data: svfloat32_t) -> f32; + } + unsafe { _svclastb_n_f32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_f64(pg: svbool_t, fallback: f64, data: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv2f64" + )] + fn _svclastb_n_f64(pg: svbool2_t, fallback: f64, data: svfloat64_t) -> f64; + } + unsafe { _svclastb_n_f64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv16i8" + )] + fn _svclastb_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8; + } + unsafe { _svclastb_n_s8(pg, fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_s16(pg: svbool_t, fallback: i16, data: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv8i16" + )] + fn _svclastb_n_s16(pg: svbool8_t, fallback: i16, data: svint16_t) -> i16; + } + unsafe { _svclastb_n_s16(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_s32(pg: svbool_t, fallback: i32, data: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv4i32" + )] + fn _svclastb_n_s32(pg: svbool4_t, fallback: i32, data: svint32_t) -> i32; + } + unsafe { _svclastb_n_s32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_s64(pg: svbool_t, fallback: i64, data: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv2i64" + )] + fn _svclastb_n_s64(pg: svbool2_t, fallback: i64, data: svint64_t) -> i64; + } + unsafe { _svclastb_n_s64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_u8(pg: svbool_t, fallback: u8, data: svuint8_t) -> u8 { + unsafe { svclastb_n_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_u16(pg: svbool_t, fallback: u16, data: svuint16_t) -> u16 { + unsafe { svclastb_n_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_u32(pg: svbool_t, fallback: u32, data: svuint32_t) -> u32 { + unsafe { svclastb_n_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_u64(pg: svbool_t, fallback: u64, data: svuint64_t) -> u64 { + unsafe { svclastb_n_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv16i8")] + fn _svcls_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svcls_s8_m(inactive.as_signed(), pg, op).as_unsigned() } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe { svcls_s8_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t { + svcls_s8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv8i16")] + fn _svcls_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svcls_s16_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe { svcls_s16_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t { + svcls_s16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv4i32")] + fn _svcls_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcls_s32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe { svcls_s32_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t { + svcls_s32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv2i64")] + fn _svcls_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcls_s64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe { svcls_s64_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t { + svcls_s64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv16i8")] + fn _svclz_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svclz_s8_m(inactive.as_signed(), pg, op).as_unsigned() } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe { svclz_s8_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t { + svclz_s8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv8i16")] + fn _svclz_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svclz_s16_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe { svclz_s16_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t { + svclz_s16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv4i32")] + fn _svclz_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svclz_s32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe { svclz_s32_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t { + svclz_s32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv2i64")] + fn _svclz_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svclz_s64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe { svclz_s64_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t { + svclz_s64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svclz_s8_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svclz_u8_m(op, pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svclz_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svclz_s16_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svclz_u16_m(op, pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svclz_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svclz_s32_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svclz_u32_m(op, pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svclz_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svclz_s64_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svclz_u64_m(op, pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svclz_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmla.nxv4f32")] + fn _svcmla_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + imm_rotation: i32, + ) -> svfloat32_t; + } + unsafe { _svcmla_f32_m(pg.sve_into(), op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svcmla_f32_m::(pg, op1, op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svcmla_f32_m::(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmla.nxv2f64")] + fn _svcmla_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + imm_rotation: i32, + ) -> svfloat64_t; + } + unsafe { _svcmla_f64_m(pg.sve_into(), op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svcmla_f64_m::(pg, op1, op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svcmla_f64_m::(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0..=1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fcmla.lane.x.nxv4f32" + )] + fn _svcmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + imm_index: i32, + imm_rotation: i32, + ) -> svfloat32_t; + } + unsafe { _svcmla_lane_f32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpeq.nxv4f32")] + fn _svcmpeq_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpeq_f32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpeq_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpeq.nxv2f64")] + fn _svcmpeq_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpeq_f64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpeq_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv16i8")] + fn _svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpeq_s8(pg, op1, op2) } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpeq_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv8i16")] + fn _svcmpeq_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpeq_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpeq_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv4i32")] + fn _svcmpeq_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpeq_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpeq_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv2i64")] + fn _svcmpeq_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpeq_s64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpeq_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svcmpeq_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpeq_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svcmpeq_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpeq_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe { svcmpeq_s32(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpeq_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe { svcmpeq_s64(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpeq_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpeq.wide.nxv16i8" + )] + fn _svcmpeq_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpeq_wide_s8(pg, op1, op2) } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmpeq_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpeq.wide.nxv8i16" + )] + fn _svcmpeq_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpeq_wide_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmpeq_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpeq.wide.nxv4i32" + )] + fn _svcmpeq_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpeq_wide_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmpeq_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpge.nxv4f32")] + fn _svcmpge_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpge_f32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpge_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpge.nxv2f64")] + fn _svcmpge_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpge_f64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpge_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv16i8")] + fn _svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpge_s8(pg, op1, op2) } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpge_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv8i16")] + fn _svcmpge_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpge_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpge_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv4i32")] + fn _svcmpge_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpge_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpge_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv2i64")] + fn _svcmpge_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpge_s64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpge_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv16i8")] + fn _svcmpge_u8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpge_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpge_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv8i16")] + fn _svcmpge_u16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpge_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpge_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv4i32")] + fn _svcmpge_u32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpge_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpge_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv2i64")] + fn _svcmpge_u64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpge_u64(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpge_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpge.wide.nxv16i8" + )] + fn _svcmpge_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpge_wide_s8(pg, op1, op2) } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmpge_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpge.wide.nxv8i16" + )] + fn _svcmpge_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpge_wide_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmpge_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpge.wide.nxv4i32" + )] + fn _svcmpge_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpge_wide_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmpge_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphs.wide.nxv16i8" + )] + fn _svcmpge_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpge_wide_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { + svcmpge_wide_u8(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphs.wide.nxv8i16" + )] + fn _svcmpge_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpge_wide_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { + svcmpge_wide_u16(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphs.wide.nxv4i32" + )] + fn _svcmpge_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpge_wide_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { + svcmpge_wide_u32(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpgt.nxv4f32")] + fn _svcmpgt_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpgt_f32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpgt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpgt.nxv2f64")] + fn _svcmpgt_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpgt_f64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpgt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv16i8")] + fn _svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpgt_s8(pg, op1, op2) } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpgt_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv8i16")] + fn _svcmpgt_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpgt_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpgt_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv4i32")] + fn _svcmpgt_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpgt_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpgt_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv2i64")] + fn _svcmpgt_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpgt_s64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpgt_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv16i8")] + fn _svcmpgt_u8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpgt_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpgt_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv8i16")] + fn _svcmpgt_u16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpgt_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpgt_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv4i32")] + fn _svcmpgt_u32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpgt_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpgt_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv2i64")] + fn _svcmpgt_u64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpgt_u64(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpgt_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpgt.wide.nxv16i8" + )] + fn _svcmpgt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpgt_wide_s8(pg, op1, op2) } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmpgt_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpgt.wide.nxv8i16" + )] + fn _svcmpgt_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpgt_wide_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmpgt_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpgt.wide.nxv4i32" + )] + fn _svcmpgt_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpgt_wide_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmpgt_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphi.wide.nxv16i8" + )] + fn _svcmpgt_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpgt_wide_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { + svcmpgt_wide_u8(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphi.wide.nxv8i16" + )] + fn _svcmpgt_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpgt_wide_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { + svcmpgt_wide_u16(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphi.wide.nxv4i32" + )] + fn _svcmpgt_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpgt_wide_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { + svcmpgt_wide_u32(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmple_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svcmpge_f32(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmple_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmple_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmple_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svcmpge_f64(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmple_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmple_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + svcmpge_s8(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmple_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + svcmpge_s16(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmple_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + svcmpge_s32(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmple_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + svcmpge_s64(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmple_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + svcmpge_u8(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmple_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + svcmpge_u16(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmple_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + svcmpge_u32(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmple_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + svcmpge_u64(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmple_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmple.wide.nxv16i8" + )] + fn _svcmple_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmple_wide_s8(pg, op1, op2) } +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmple_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmple.wide.nxv8i16" + )] + fn _svcmple_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmple_wide_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmple_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmple.wide.nxv4i32" + )] + fn _svcmple_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmple_wide_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmple_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpls.wide.nxv16i8" + )] + fn _svcmple_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmple_wide_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { + svcmple_wide_u8(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpls.wide.nxv8i16" + )] + fn _svcmple_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmple_wide_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { + svcmple_wide_u16(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpls.wide.nxv4i32" + )] + fn _svcmple_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmple_wide_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { + svcmple_wide_u32(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svcmpgt_f32(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmplt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svcmpgt_f64(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmplt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + svcmpgt_s8(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmplt_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + svcmpgt_s16(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmplt_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + svcmpgt_s32(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmplt_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + svcmpgt_s64(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmplt_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + svcmpgt_u8(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmplt_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + svcmpgt_u16(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmplt_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + svcmpgt_u32(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmplt_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + svcmpgt_u64(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmplt_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplt.wide.nxv16i8" + )] + fn _svcmplt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmplt_wide_s8(pg, op1, op2) } +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmplt_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplt.wide.nxv8i16" + )] + fn _svcmplt_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmplt_wide_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmplt_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplt.wide.nxv4i32" + )] + fn _svcmplt_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmplt_wide_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmplt_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplo.wide.nxv16i8" + )] + fn _svcmplt_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmplt_wide_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { + svcmplt_wide_u8(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplo.wide.nxv8i16" + )] + fn _svcmplt_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmplt_wide_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { + svcmplt_wide_u16(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplo.wide.nxv4i32" + )] + fn _svcmplt_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmplt_wide_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { + svcmplt_wide_u32(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpne.nxv4f32")] + fn _svcmpne_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpne_f32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpne_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpne.nxv2f64")] + fn _svcmpne_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpne_f64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpne_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv16i8")] + fn _svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpne_s8(pg, op1, op2) } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpne_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv8i16")] + fn _svcmpne_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpne_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpne_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv4i32")] + fn _svcmpne_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpne_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpne_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv2i64")] + fn _svcmpne_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpne_s64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpne_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svcmpne_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpne_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svcmpne_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpne_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe { svcmpne_s32(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpne_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe { svcmpne_s64(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpne_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpne.wide.nxv16i8" + )] + fn _svcmpne_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpne_wide_s8(pg, op1, op2) } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmpne_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpne.wide.nxv8i16" + )] + fn _svcmpne_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpne_wide_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmpne_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpne.wide.nxv4i32" + )] + fn _svcmpne_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpne_wide_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmpne_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare unordered with"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmuo))] +pub fn svcmpuo_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpuo.nxv4f32")] + fn _svcmpuo_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpuo_f32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare unordered with"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmuo))] +pub fn svcmpuo_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpuo_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare unordered with"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmuo))] +pub fn svcmpuo_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpuo.nxv2f64")] + fn _svcmpuo_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpuo_f64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare unordered with"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmuo))] +pub fn svcmpuo_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpuo_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv16i8")] + fn _svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svcnot_s8_m(inactive, pg, op) } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svcnot_s8_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svcnot_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv8i16")] + fn _svcnot_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svcnot_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svcnot_s16_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svcnot_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv4i32")] + fn _svcnot_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcnot_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svcnot_s32_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svcnot_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv2i64")] + fn _svcnot_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcnot_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svcnot_s64_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svcnot_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svcnot_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnot_u8_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnot_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svcnot_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnot_u16_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnot_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svcnot_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnot_u32_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnot_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svcnot_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnot_u64_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnot_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f32_m(inactive: svuint32_t, pg: svbool_t, op: svfloat32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv4f32")] + fn _svcnt_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t; + } + unsafe { _svcnt_f32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint32_t { + unsafe { svcnt_f32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint32_t { + svcnt_f32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f64_m(inactive: svuint64_t, pg: svbool_t, op: svfloat64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv2f64")] + fn _svcnt_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t; + } + unsafe { _svcnt_f64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint64_t { + unsafe { svcnt_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint64_t { + svcnt_f64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv16i8")] + fn _svcnt_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svcnt_s8_m(inactive.as_signed(), pg, op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe { svcnt_s8_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t { + svcnt_s8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv8i16")] + fn _svcnt_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svcnt_s16_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe { svcnt_s16_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t { + svcnt_s16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv4i32")] + fn _svcnt_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcnt_s32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe { svcnt_s32_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t { + svcnt_s32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv2i64")] + fn _svcnt_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcnt_s64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe { svcnt_s64_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t { + svcnt_s64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svcnt_s8_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnt_u8_m(op, pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnt_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svcnt_s16_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnt_u16_m(op, pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnt_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svcnt_s32_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnt_u32_m(op, pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnt_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svcnt_s64_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnt_u64_m(op, pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnt_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count the number of 8-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntb)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rdvl))] +pub fn svcntb() -> u64 { + svcntb_pat::<{ svpattern::SV_ALL }>() +} +#[doc = "Count the number of 16-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnth)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnth))] +pub fn svcnth() -> u64 { + svcnth_pat::<{ svpattern::SV_ALL }>() +} +#[doc = "Count the number of 32-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntw)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntw))] +pub fn svcntw() -> u64 { + svcntw_pat::<{ svpattern::SV_ALL }>() +} +#[doc = "Count the number of 64-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntd)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntd))] +pub fn svcntd() -> u64 { + svcntd_pat::<{ svpattern::SV_ALL }>() +} +#[doc = "Count the number of 8-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntb_pat)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (rdvl , PATTERN = { svpattern :: SV_ALL }))] +# [cfg_attr (test , assert_instr (cntb , PATTERN = { svpattern :: SV_MUL4 }))] +pub fn svcntb_pat() -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntb")] + fn _svcntb_pat(pattern: svpattern) -> i64; + } + unsafe { _svcntb_pat(PATTERN).as_unsigned() } +} +#[doc = "Count the number of 16-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnth_pat)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (cnth , PATTERN = { svpattern :: SV_ALL }))] +pub fn svcnth_pat() -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnth")] + fn _svcnth_pat(pattern: svpattern) -> i64; + } + unsafe { _svcnth_pat(PATTERN).as_unsigned() } +} +#[doc = "Count the number of 32-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntw_pat)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (cntw , PATTERN = { svpattern :: SV_ALL }))] +pub fn svcntw_pat() -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntw")] + fn _svcntw_pat(pattern: svpattern) -> i64; + } + unsafe { _svcntw_pat(PATTERN).as_unsigned() } +} +#[doc = "Count the number of 64-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntd_pat)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (cntd , PATTERN = { svpattern :: SV_ALL }))] +pub fn svcntd_pat() -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntd")] + fn _svcntd_pat(pattern: svpattern) -> i64; + } + unsafe { _svcntd_pat(PATTERN).as_unsigned() } +} +#[doc = "Count set predicate bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b8(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv16i1")] + fn _svcntp_b8(pg: svbool_t, op: svbool_t) -> i64; + } + unsafe { _svcntp_b8(pg, op).as_unsigned() } +} +#[doc = "Count set predicate bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b16(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv8i1")] + fn _svcntp_b16(pg: svbool8_t, op: svbool8_t) -> i64; + } + unsafe { _svcntp_b16(pg.sve_into(), op.sve_into()).as_unsigned() } +} +#[doc = "Count set predicate bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b32(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv4i1")] + fn _svcntp_b32(pg: svbool4_t, op: svbool4_t) -> i64; + } + unsafe { _svcntp_b32(pg.sve_into(), op.sve_into()).as_unsigned() } +} +#[doc = "Count set predicate bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b64(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv2i1")] + fn _svcntp_b64(pg: svbool2_t, op: svbool2_t) -> i64; + } + unsafe { _svcntp_b64(pg.sve_into(), op.sve_into()).as_unsigned() } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_f32(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.compact.nxv4f32" + )] + fn _svcompact_f32(pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svcompact_f32(pg.sve_into(), op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_f64(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.compact.nxv2f64" + )] + fn _svcompact_f64(pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svcompact_f64(pg.sve_into(), op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_s32(pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.compact.nxv4i32" + )] + fn _svcompact_s32(pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcompact_s32(pg.sve_into(), op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_s64(pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.compact.nxv2i64" + )] + fn _svcompact_s64(pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcompact_s64(pg.sve_into(), op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_u32(pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svcompact_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_u64(pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svcompact_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_f32(x0: svfloat32_t, x1: svfloat32_t) -> svfloat32x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_f64(x0: svfloat64_t, x1: svfloat64_t) -> svfloat64x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_s8(x0: svint8_t, x1: svint8_t) -> svint8x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_s16(x0: svint16_t, x1: svint16_t) -> svint16x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_s32(x0: svint32_t, x1: svint32_t) -> svint32x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_s64(x0: svint64_t, x1: svint64_t) -> svint64x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_u8(x0: svuint8_t, x1: svuint8_t) -> svuint8x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_u16(x0: svuint16_t, x1: svuint16_t) -> svuint16x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_u32(x0: svuint32_t, x1: svuint32_t) -> svuint32x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_u64(x0: svuint64_t, x1: svuint64_t) -> svuint64x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_f32(x0: svfloat32_t, x1: svfloat32_t, x2: svfloat32_t) -> svfloat32x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_f64(x0: svfloat64_t, x1: svfloat64_t, x2: svfloat64_t) -> svfloat64x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t) -> svint8x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_s16(x0: svint16_t, x1: svint16_t, x2: svint16_t) -> svint16x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_s32(x0: svint32_t, x1: svint32_t, x2: svint32_t) -> svint32x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_s64(x0: svint64_t, x1: svint64_t, x2: svint64_t) -> svint64x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_u8(x0: svuint8_t, x1: svuint8_t, x2: svuint8_t) -> svuint8x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_u16(x0: svuint16_t, x1: svuint16_t, x2: svuint16_t) -> svuint16x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_u32(x0: svuint32_t, x1: svuint32_t, x2: svuint32_t) -> svuint32x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_u64(x0: svuint64_t, x1: svuint64_t, x2: svuint64_t) -> svuint64x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_f32( + x0: svfloat32_t, + x1: svfloat32_t, + x2: svfloat32_t, + x3: svfloat32_t, +) -> svfloat32x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_f64( + x0: svfloat64_t, + x1: svfloat64_t, + x2: svfloat64_t, + x3: svfloat64_t, +) -> svfloat64x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t, x3: svint8_t) -> svint8x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_s16(x0: svint16_t, x1: svint16_t, x2: svint16_t, x3: svint16_t) -> svint16x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_s32(x0: svint32_t, x1: svint32_t, x2: svint32_t, x3: svint32_t) -> svint32x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_s64(x0: svint64_t, x1: svint64_t, x2: svint64_t, x3: svint64_t) -> svint64x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_u8(x0: svuint8_t, x1: svuint8_t, x2: svuint8_t, x3: svuint8_t) -> svuint8x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_u16( + x0: svuint16_t, + x1: svuint16_t, + x2: svuint16_t, + x3: svuint16_t, +) -> svuint16x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_u32( + x0: svuint32_t, + x1: svuint32_t, + x2: svuint32_t, + x3: svuint32_t, +) -> svuint32x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_u64( + x0: svuint64_t, + x1: svuint64_t, + x2: svuint64_t, + x3: svuint64_t, +) -> svuint64x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f32_f64_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvt.f32f64")] + fn _svcvt_f32_f64_m(inactive: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f32_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe { svcvt_f32_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f32_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + svcvt_f32_f64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f64_f32_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvt.f64f32")] + fn _svcvt_f64_f32_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat32_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f64_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + unsafe { svcvt_f64_f32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f64_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + svcvt_f64_f32_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool_t, op: svint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.scvtf.f32i32")] + fn _svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_x(pg: svbool_t, op: svint32_t) -> svfloat32_t { + unsafe { svcvt_f32_s32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_z(pg: svbool_t, op: svint32_t) -> svfloat32_t { + svcvt_f32_s32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool_t, op: svint64_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.scvtf.f32i64")] + fn _svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_x(pg: svbool_t, op: svint64_t) -> svfloat32_t { + unsafe { svcvt_f32_s64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_z(pg: svbool_t, op: svint64_t) -> svfloat32_t { + svcvt_f32_s64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool_t, op: svuint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ucvtf.f32i32")] + fn _svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_u32_m(inactive, pg.sve_into(), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat32_t { + unsafe { svcvt_f32_u32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat32_t { + svcvt_f32_u32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool_t, op: svuint64_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ucvtf.f32i64")] + fn _svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_u64_m(inactive, pg.sve_into(), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat32_t { + unsafe { svcvt_f32_u64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat32_t { + svcvt_f32_u64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool_t, op: svint32_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.scvtf.f64i32")] + fn _svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_x(pg: svbool_t, op: svint32_t) -> svfloat64_t { + unsafe { svcvt_f64_s32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_z(pg: svbool_t, op: svint32_t) -> svfloat64_t { + svcvt_f64_s32_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool_t, op: svint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.scvtf.f64i64")] + fn _svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_x(pg: svbool_t, op: svint64_t) -> svfloat64_t { + unsafe { svcvt_f64_s64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_z(pg: svbool_t, op: svint64_t) -> svfloat64_t { + svcvt_f64_s64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool_t, op: svuint32_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ucvtf.f64i32")] + fn _svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_u32_m(inactive, pg.sve_into(), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat64_t { + unsafe { svcvt_f64_u32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat64_t { + svcvt_f64_u32_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool_t, op: svuint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ucvtf.f64i64")] + fn _svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_u64_m(inactive, pg.sve_into(), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat64_t { + unsafe { svcvt_f64_u64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat64_t { + svcvt_f64_u64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f32_m(inactive: svint32_t, pg: svbool_t, op: svfloat32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i32f32")] + fn _svcvt_s32_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t; + } + unsafe { _svcvt_s32_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f32_x(pg: svbool_t, op: svfloat32_t) -> svint32_t { + unsafe { svcvt_s32_f32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f32_z(pg: svbool_t, op: svfloat32_t) -> svint32_t { + svcvt_s32_f32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f64_m(inactive: svint32_t, pg: svbool_t, op: svfloat64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i32f64")] + fn _svcvt_s32_f64_m(inactive: svint32_t, pg: svbool2_t, op: svfloat64_t) -> svint32_t; + } + unsafe { _svcvt_s32_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f64_x(pg: svbool_t, op: svfloat64_t) -> svint32_t { + unsafe { svcvt_s32_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f64_z(pg: svbool_t, op: svfloat64_t) -> svint32_t { + svcvt_s32_f64_m(svdup_n_s32(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f32_m(inactive: svint64_t, pg: svbool_t, op: svfloat32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i64f32")] + fn _svcvt_s64_f32_m(inactive: svint64_t, pg: svbool2_t, op: svfloat32_t) -> svint64_t; + } + unsafe { _svcvt_s64_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f32_x(pg: svbool_t, op: svfloat32_t) -> svint64_t { + unsafe { svcvt_s64_f32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f32_z(pg: svbool_t, op: svfloat32_t) -> svint64_t { + svcvt_s64_f32_m(svdup_n_s64(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f64_m(inactive: svint64_t, pg: svbool_t, op: svfloat64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i64f64")] + fn _svcvt_s64_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t; + } + unsafe { _svcvt_s64_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f64_x(pg: svbool_t, op: svfloat64_t) -> svint64_t { + unsafe { svcvt_s64_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f64_z(pg: svbool_t, op: svfloat64_t) -> svint64_t { + svcvt_s64_f64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f32_m(inactive: svuint32_t, pg: svbool_t, op: svfloat32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i32f32")] + fn _svcvt_u32_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t; + } + unsafe { _svcvt_u32_f32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint32_t { + unsafe { svcvt_u32_f32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint32_t { + svcvt_u32_f32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f64_m(inactive: svuint32_t, pg: svbool_t, op: svfloat64_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i32f64")] + fn _svcvt_u32_f64_m(inactive: svint32_t, pg: svbool2_t, op: svfloat64_t) -> svint32_t; + } + unsafe { _svcvt_u32_f64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint32_t { + unsafe { svcvt_u32_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint32_t { + svcvt_u32_f64_m(svdup_n_u32(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f32_m(inactive: svuint64_t, pg: svbool_t, op: svfloat32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i64f32")] + fn _svcvt_u64_f32_m(inactive: svint64_t, pg: svbool2_t, op: svfloat32_t) -> svint64_t; + } + unsafe { _svcvt_u64_f32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint64_t { + unsafe { svcvt_u64_f32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint64_t { + svcvt_u64_f32_m(svdup_n_u64(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f64_m(inactive: svuint64_t, pg: svbool_t, op: svfloat64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i64f64")] + fn _svcvt_u64_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t; + } + unsafe { _svcvt_u64_f64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint64_t { + unsafe { svcvt_u64_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint64_t { + svcvt_u64_f64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdiv.nxv4f32")] + fn _svdiv_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svdiv_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdiv_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svdiv_f32_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdiv_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svdiv_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdiv_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdiv.nxv2f64")] + fn _svdiv_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svdiv_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdiv_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svdiv_f64_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdiv_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svdiv_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdiv_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdiv.nxv4i32")] + fn _svdiv_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svdiv_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdiv_s32_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdiv_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdiv.nxv2i64")] + fn _svdiv_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svdiv_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdiv_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svdiv_s64_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdiv_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svdiv_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdiv_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udiv.nxv4i32")] + fn _svdiv_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svdiv_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdiv_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svdiv_u32_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdiv_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svdiv_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdiv_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udiv.nxv2i64")] + fn _svdiv_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svdiv_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdiv_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svdiv_u64_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdiv_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svdiv_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdiv_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdivr.nxv4f32")] + fn _svdivr_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svdivr_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdivr_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svdivr_f32_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdivr_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svdivr_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdivr_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdivr.nxv2f64")] + fn _svdivr_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svdivr_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdivr_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svdivr_f64_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdivr_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svdivr_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdivr_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdivr.nxv4i32")] + fn _svdivr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svdivr_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdivr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdivr_s32_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdivr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdivr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdivr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdivr.nxv2i64")] + fn _svdivr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svdivr_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdivr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svdivr_s64_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdivr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svdivr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdivr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udivr.nxv4i32")] + fn _svdivr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svdivr_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdivr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svdivr_u32_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdivr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svdivr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdivr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udivr.nxv2i64")] + fn _svdivr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svdivr_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdivr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svdivr_u64_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdivr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svdivr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdivr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdot, IMM_INDEX = 0))] +pub fn svdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sdot.lane.nxv4i32" + )] + fn _svdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + ) -> svint32_t; + } + unsafe { _svdot_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdot, IMM_INDEX = 0))] +pub fn svdot_lane_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sdot.lane.nxv2i64" + )] + fn _svdot_lane_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + ) -> svint64_t; + } + unsafe { _svdot_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udot, IMM_INDEX = 0))] +pub fn svdot_lane_u32( + op1: svuint32_t, + op2: svuint8_t, + op3: svuint8_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.udot.lane.nxv4i32" + )] + fn _svdot_lane_u32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + ) -> svint32_t; + } + unsafe { + _svdot_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX).as_unsigned() + } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udot, IMM_INDEX = 0))] +pub fn svdot_lane_u64( + op1: svuint64_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.udot.lane.nxv2i64" + )] + fn _svdot_lane_u64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + ) -> svint64_t; + } + unsafe { + _svdot_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX).as_unsigned() + } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdot))] +pub fn svdot_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdot.nxv4i32")] + fn _svdot_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svdot_s32(op1, op2, op3) } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdot))] +pub fn svdot_n_s32(op1: svint32_t, op2: svint8_t, op3: i8) -> svint32_t { + svdot_s32(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdot))] +pub fn svdot_s64(op1: svint64_t, op2: svint16_t, op3: svint16_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdot.nxv2i64")] + fn _svdot_s64(op1: svint64_t, op2: svint16_t, op3: svint16_t) -> svint64_t; + } + unsafe { _svdot_s64(op1, op2, op3) } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdot))] +pub fn svdot_n_s64(op1: svint64_t, op2: svint16_t, op3: i16) -> svint64_t { + svdot_s64(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udot))] +pub fn svdot_u32(op1: svuint32_t, op2: svuint8_t, op3: svuint8_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udot.nxv4i32")] + fn _svdot_u32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svdot_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udot))] +pub fn svdot_n_u32(op1: svuint32_t, op2: svuint8_t, op3: u8) -> svuint32_t { + svdot_u32(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udot))] +pub fn svdot_u64(op1: svuint64_t, op2: svuint16_t, op3: svuint16_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udot.nxv2i64")] + fn _svdot_u64(op1: svint64_t, op2: svint16_t, op3: svint16_t) -> svint64_t; + } + unsafe { _svdot_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udot))] +pub fn svdot_n_u64(op1: svuint64_t, op2: svuint16_t, op3: u16) -> svuint64_t { + svdot_u64(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_f32(data: svfloat32_t, index: u32) -> svfloat32_t { + svtbl_f32(data, svdup_n_u32(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_f64(data: svfloat64_t, index: u64) -> svfloat64_t { + svtbl_f64(data, svdup_n_u64(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_s8(data: svint8_t, index: u8) -> svint8_t { + svtbl_s8(data, svdup_n_u8(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_s16(data: svint16_t, index: u16) -> svint16_t { + svtbl_s16(data, svdup_n_u16(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_s32(data: svint32_t, index: u32) -> svint32_t { + svtbl_s32(data, svdup_n_u32(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_s64(data: svint64_t, index: u64) -> svint64_t { + svtbl_s64(data, svdup_n_u64(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_u8(data: svuint8_t, index: u8) -> svuint8_t { + svtbl_u8(data, svdup_n_u8(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_u16(data: svuint16_t, index: u16) -> svuint16_t { + svtbl_u16(data, svdup_n_u16(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_u32(data: svuint32_t, index: u32) -> svuint32_t { + svtbl_u32(data, svdup_n_u32(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_u64(data: svuint64_t, index: u64) -> svuint64_t { + svtbl_u64(data, svdup_n_u64(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbfx))] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svdup_n_b8(op: bool) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv16i1")] + fn _svdup_n_b8(op: bool) -> svbool_t; + } + unsafe { _svdup_n_b8(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbfx))] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svdup_n_b16(op: bool) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv8i1")] + fn _svdup_n_b16(op: bool) -> svbool8_t; + } + unsafe { _svdup_n_b16(op).sve_into() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbfx))] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svdup_n_b32(op: bool) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i1")] + fn _svdup_n_b32(op: bool) -> svbool4_t; + } + unsafe { _svdup_n_b32(op).sve_into() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbfx))] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svdup_n_b64(op: bool) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2i1")] + fn _svdup_n_b64(op: bool) -> svbool2_t; + } + unsafe { _svdup_n_b64(op).sve_into() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32(op: f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4f32")] + fn _svdup_n_f32(op: f32) -> svfloat32_t; + } + unsafe { _svdup_n_f32(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64(op: f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2f64")] + fn _svdup_n_f64(op: f64) -> svfloat64_t; + } + unsafe { _svdup_n_f64(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8(op: i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv16i8")] + fn _svdup_n_s8(op: i8) -> svint8_t; + } + unsafe { _svdup_n_s8(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16(op: i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv8i16")] + fn _svdup_n_s16(op: i16) -> svint16_t; + } + unsafe { _svdup_n_s16(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32(op: i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")] + fn _svdup_n_s32(op: i32) -> svint32_t; + } + unsafe { _svdup_n_s32(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64(op: i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2i64")] + fn _svdup_n_s64(op: i64) -> svint64_t; + } + unsafe { _svdup_n_s64(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8(op: u8) -> svuint8_t { + unsafe { svdup_n_s8(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16(op: u16) -> svuint16_t { + unsafe { svdup_n_s16(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32(op: u32) -> svuint32_t { + unsafe { svdup_n_s32(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64(op: u64) -> svuint64_t { + unsafe { svdup_n_s64(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32_m(inactive: svfloat32_t, pg: svbool_t, op: f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv4f32")] + fn _svdup_n_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: f32) -> svfloat32_t; + } + unsafe { _svdup_n_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32_x(pg: svbool_t, op: f32) -> svfloat32_t { + svdup_n_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32_z(pg: svbool_t, op: f32) -> svfloat32_t { + svdup_n_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64_m(inactive: svfloat64_t, pg: svbool_t, op: f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv2f64")] + fn _svdup_n_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: f64) -> svfloat64_t; + } + unsafe { _svdup_n_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64_x(pg: svbool_t, op: f64) -> svfloat64_t { + svdup_n_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64_z(pg: svbool_t, op: f64) -> svfloat64_t { + svdup_n_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8_m(inactive: svint8_t, pg: svbool_t, op: i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv16i8")] + fn _svdup_n_s8_m(inactive: svint8_t, pg: svbool_t, op: i8) -> svint8_t; + } + unsafe { _svdup_n_s8_m(inactive, pg, op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8_x(pg: svbool_t, op: i8) -> svint8_t { + svdup_n_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8_z(pg: svbool_t, op: i8) -> svint8_t { + svdup_n_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16_m(inactive: svint16_t, pg: svbool_t, op: i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv8i16")] + fn _svdup_n_s16_m(inactive: svint16_t, pg: svbool8_t, op: i16) -> svint16_t; + } + unsafe { _svdup_n_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16_x(pg: svbool_t, op: i16) -> svint16_t { + svdup_n_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16_z(pg: svbool_t, op: i16) -> svint16_t { + svdup_n_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32_m(inactive: svint32_t, pg: svbool_t, op: i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv4i32")] + fn _svdup_n_s32_m(inactive: svint32_t, pg: svbool4_t, op: i32) -> svint32_t; + } + unsafe { _svdup_n_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32_x(pg: svbool_t, op: i32) -> svint32_t { + svdup_n_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32_z(pg: svbool_t, op: i32) -> svint32_t { + svdup_n_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64_m(inactive: svint64_t, pg: svbool_t, op: i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv2i64")] + fn _svdup_n_s64_m(inactive: svint64_t, pg: svbool2_t, op: i64) -> svint64_t; + } + unsafe { _svdup_n_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64_x(pg: svbool_t, op: i64) -> svint64_t { + svdup_n_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64_z(pg: svbool_t, op: i64) -> svint64_t { + svdup_n_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8_m(inactive: svuint8_t, pg: svbool_t, op: u8) -> svuint8_t { + unsafe { svdup_n_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8_x(pg: svbool_t, op: u8) -> svuint8_t { + svdup_n_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8_z(pg: svbool_t, op: u8) -> svuint8_t { + svdup_n_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16_m(inactive: svuint16_t, pg: svbool_t, op: u16) -> svuint16_t { + unsafe { svdup_n_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16_x(pg: svbool_t, op: u16) -> svuint16_t { + svdup_n_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16_z(pg: svbool_t, op: u16) -> svuint16_t { + svdup_n_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32_m(inactive: svuint32_t, pg: svbool_t, op: u32) -> svuint32_t { + unsafe { svdup_n_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32_x(pg: svbool_t, op: u32) -> svuint32_t { + svdup_n_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32_z(pg: svbool_t, op: u32) -> svuint32_t { + svdup_n_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64_m(inactive: svuint64_t, pg: svbool_t, op: u64) -> svuint64_t { + unsafe { svdup_n_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64_x(pg: svbool_t, op: u64) -> svuint64_t { + svdup_n_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64_z(pg: svbool_t, op: u64) -> svuint64_t { + svdup_n_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_f32(data: svfloat32_t, index: u64) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv4f32" + )] + fn _svdupq_lane_f32(data: svfloat32_t, index: i64) -> svfloat32_t; + } + unsafe { _svdupq_lane_f32(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_f64(data: svfloat64_t, index: u64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv2f64" + )] + fn _svdupq_lane_f64(data: svfloat64_t, index: i64) -> svfloat64_t; + } + unsafe { _svdupq_lane_f64(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_s8(data: svint8_t, index: u64) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv16i8" + )] + fn _svdupq_lane_s8(data: svint8_t, index: i64) -> svint8_t; + } + unsafe { _svdupq_lane_s8(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_s16(data: svint16_t, index: u64) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv8i16" + )] + fn _svdupq_lane_s16(data: svint16_t, index: i64) -> svint16_t; + } + unsafe { _svdupq_lane_s16(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_s32(data: svint32_t, index: u64) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv4i32" + )] + fn _svdupq_lane_s32(data: svint32_t, index: i64) -> svint32_t; + } + unsafe { _svdupq_lane_s32(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_s64(data: svint64_t, index: u64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv2i64" + )] + fn _svdupq_lane_s64(data: svint64_t, index: i64) -> svint64_t; + } + unsafe { _svdupq_lane_s64(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_u8(data: svuint8_t, index: u64) -> svuint8_t { + unsafe { svdupq_lane_s8(data.as_signed(), index).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_u16(data: svuint16_t, index: u64) -> svuint16_t { + unsafe { svdupq_lane_s16(data.as_signed(), index).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_u32(data: svuint32_t, index: u64) -> svuint32_t { + unsafe { svdupq_lane_s32(data.as_signed(), index).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_u64(data: svuint64_t, index: u64) -> svuint64_t { + unsafe { svdupq_lane_s64(data.as_signed(), index).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_b16( + x0: bool, + x1: bool, + x2: bool, + x3: bool, + x4: bool, + x5: bool, + x6: bool, + x7: bool, +) -> svbool_t { + let op1 = svdupq_n_s16( + x0 as i16, x1 as i16, x2 as i16, x3 as i16, x4 as i16, x5 as i16, x6 as i16, x7 as i16, + ); + svcmpne_wide_s16(svptrue_b16(), op1, svdup_n_s64(0)) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_b32(x0: bool, x1: bool, x2: bool, x3: bool) -> svbool_t { + let op1 = svdupq_n_s32(x0 as i32, x1 as i32, x2 as i32, x3 as i32); + svcmpne_wide_s32(svptrue_b32(), op1, svdup_n_s64(0)) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_b64(x0: bool, x1: bool) -> svbool_t { + let op1 = svdupq_n_s64(x0 as i64, x1 as i64); + svcmpne_s64(svptrue_b64(), op1, svdup_n_s64(0)) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_b8( + x0: bool, + x1: bool, + x2: bool, + x3: bool, + x4: bool, + x5: bool, + x6: bool, + x7: bool, + x8: bool, + x9: bool, + x10: bool, + x11: bool, + x12: bool, + x13: bool, + x14: bool, + x15: bool, +) -> svbool_t { + let op1 = svdupq_n_s8( + x0 as i8, x1 as i8, x2 as i8, x3 as i8, x4 as i8, x5 as i8, x6 as i8, x7 as i8, x8 as i8, + x9 as i8, x10 as i8, x11 as i8, x12 as i8, x13 as i8, x14 as i8, x15 as i8, + ); + svcmpne_wide_s8(svptrue_b8(), op1, svdup_n_s64(0)) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_f32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_f32(x0: f32, x1: f32, x2: f32, x3: f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv4f32.v4f32" + )] + fn _svdupq_n_f32(op0: svfloat32_t, op1: float32x4_t, idx: i64) -> svfloat32_t; + } + unsafe { + let op = _svdupq_n_f32(svundef_f32(), crate::mem::transmute([x0, x1, x2, x3]), 0); + svdupq_lane_f32(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_s32(x0: i32, x1: i32, x2: i32, x3: i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv4i32.v4i32" + )] + fn _svdupq_n_s32(op0: svint32_t, op1: int32x4_t, idx: i64) -> svint32_t; + } + unsafe { + let op = _svdupq_n_s32(svundef_s32(), crate::mem::transmute([x0, x1, x2, x3]), 0); + svdupq_lane_s32(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_u32(x0: u32, x1: u32, x2: u32, x3: u32) -> svuint32_t { + unsafe { + svdupq_n_s32( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_f64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_f64(x0: f64, x1: f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv2f64.v2f64" + )] + fn _svdupq_n_f64(op0: svfloat64_t, op1: float64x2_t, idx: i64) -> svfloat64_t; + } + unsafe { + let op = _svdupq_n_f64(svundef_f64(), crate::mem::transmute([x0, x1]), 0); + svdupq_lane_f64(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_s64(x0: i64, x1: i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv2i64.v2i64" + )] + fn _svdupq_n_s64(op0: svint64_t, op1: int64x2_t, idx: i64) -> svint64_t; + } + unsafe { + let op = _svdupq_n_s64(svundef_s64(), crate::mem::transmute([x0, x1]), 0); + svdupq_lane_s64(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_u64(x0: u64, x1: u64) -> svuint64_t { + unsafe { svdupq_n_s64(x0.as_signed(), x1.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_s16( + x0: i16, + x1: i16, + x2: i16, + x3: i16, + x4: i16, + x5: i16, + x6: i16, + x7: i16, +) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv8i16.v8i16" + )] + fn _svdupq_n_s16(op0: svint16_t, op1: int16x8_t, idx: i64) -> svint16_t; + } + unsafe { + let op = _svdupq_n_s16( + svundef_s16(), + crate::mem::transmute([x0, x1, x2, x3, x4, x5, x6, x7]), + 0, + ); + svdupq_lane_s16(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_u16( + x0: u16, + x1: u16, + x2: u16, + x3: u16, + x4: u16, + x5: u16, + x6: u16, + x7: u16, +) -> svuint16_t { + unsafe { + svdupq_n_s16( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + x4.as_signed(), + x5.as_signed(), + x6.as_signed(), + x7.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_s8( + x0: i8, + x1: i8, + x2: i8, + x3: i8, + x4: i8, + x5: i8, + x6: i8, + x7: i8, + x8: i8, + x9: i8, + x10: i8, + x11: i8, + x12: i8, + x13: i8, + x14: i8, + x15: i8, +) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv16i8.v16i8" + )] + fn _svdupq_n_s8(op0: svint8_t, op1: int8x16_t, idx: i64) -> svint8_t; + } + unsafe { + let op = _svdupq_n_s8( + svundef_s8(), + crate::mem::transmute([ + x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, + ]), + 0, + ); + svdupq_lane_s8(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_u8( + x0: u8, + x1: u8, + x2: u8, + x3: u8, + x4: u8, + x5: u8, + x6: u8, + x7: u8, + x8: u8, + x9: u8, + x10: u8, + x11: u8, + x12: u8, + x13: u8, + x14: u8, + x15: u8, +) -> svuint8_t { + unsafe { + svdupq_n_s8( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + x4.as_signed(), + x5.as_signed(), + x6.as_signed(), + x7.as_signed(), + x8.as_signed(), + x9.as_signed(), + x10.as_signed(), + x11.as_signed(), + x12.as_signed(), + x13.as_signed(), + x14.as_signed(), + x15.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.z.nvx16i1")] + fn _sveor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _sveor_b_z(pg, op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv16i8")] + fn _sveor_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _sveor_s8_m(pg, op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + sveor_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + sveor_s8_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + sveor_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + sveor_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + sveor_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv8i16")] + fn _sveor_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _sveor_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + sveor_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + sveor_s16_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + sveor_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + sveor_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + sveor_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv4i32")] + fn _sveor_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _sveor_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + sveor_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + sveor_s32_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + sveor_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + sveor_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + sveor_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv2i64")] + fn _sveor_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _sveor_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + sveor_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + sveor_s64_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + sveor_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + sveor_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + sveor_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { sveor_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveor_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + sveor_u8_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveor_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + sveor_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveor_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { sveor_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveor_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + sveor_u16_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveor_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + sveor_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveor_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { sveor_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveor_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + sveor_u32_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveor_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + sveor_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveor_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { sveor_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveor_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + sveor_u64_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveor_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + sveor_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveor_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv16i8")] + fn _sveorv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _sveorv_s8(pg, op) } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv8i16")] + fn _sveorv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _sveorv_s16(pg.sve_into(), op) } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv4i32")] + fn _sveorv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _sveorv_s32(pg.sve_into(), op) } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv2i64")] + fn _sveorv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _sveorv_s64(pg.sve_into(), op) } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { sveorv_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { sveorv_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { sveorv_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { sveorv_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Floating-point exponential accelerator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexpa[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fexpa))] +pub fn svexpa_f32(op: svuint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fexpa.x.nxv4f32 " + )] + fn _svexpa_f32(op: svint32_t) -> svfloat32_t; + } + unsafe { _svexpa_f32(op.as_signed()) } +} +#[doc = "Floating-point exponential accelerator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexpa[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fexpa))] +pub fn svexpa_f64(op: svuint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fexpa.x.nxv2f64 " + )] + fn _svexpa_f64(op: svint64_t) -> svfloat64_t; + } + unsafe { _svexpa_f64(op.as_signed()) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + static_assert_range!(IMM3, 0..=63); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv4f32")] + fn _svext_f32(op1: svfloat32_t, op2: svfloat32_t, imm3: i32) -> svfloat32_t; + } + unsafe { _svext_f32(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + static_assert_range!(IMM3, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv2f64")] + fn _svext_f64(op1: svfloat64_t, op2: svfloat64_t, imm3: i32) -> svfloat64_t; + } + unsafe { _svext_f64(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 0..=255); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv16i8")] + fn _svext_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svext_s8(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 0..=127); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv8i16")] + fn _svext_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svext_s16(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 0..=63); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv4i32")] + fn _svext_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svext_s32(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv2i64")] + fn _svext_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svext_s64(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 0..=255); + unsafe { svext_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 0..=127); + unsafe { svext_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 0..=63); + unsafe { svext_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 0..=31); + unsafe { svext_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtb.nxv8i16")] + fn _svextb_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svextb_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svextb_s16_m(op, pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svextb_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtb.nxv4i32")] + fn _svextb_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svextb_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svextb_s32_m(op, pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svextb_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxth.nxv4i32")] + fn _svexth_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svexth_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svexth_s32_m(op, pg, op) +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svexth_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtb.nxv2i64")] + fn _svextb_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svextb_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svextb_s64_m(op, pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svextb_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxth.nxv2i64")] + fn _svexth_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svexth_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svexth_s64_m(op, pg, op) +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svexth_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Sign-extend the low 32 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtw))] +pub fn svextw_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtw.nxv2i64")] + fn _svextw_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svextw_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Sign-extend the low 32 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtw))] +pub fn svextw_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svextw_s64_m(op, pg, op) +} +#[doc = "Sign-extend the low 32 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtw))] +pub fn svextw_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svextw_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtb.nxv8i16")] + fn _svextb_u16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svextb_u16_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svextb_u16_m(op, pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svextb_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtb.nxv4i32")] + fn _svextb_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svextb_u32_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svextb_u32_m(op, pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svextb_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxth.nxv4i32")] + fn _svexth_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svexth_u32_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svexth_u32_m(op, pg, op) +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svexth_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtb.nxv2i64")] + fn _svextb_u64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svextb_u64_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svextb_u64_m(op, pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svextb_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxth.nxv2i64")] + fn _svexth_u64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svexth_u64_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svexth_u64_m(op, pg, op) +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svexth_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Zero-extend the low 32 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtw))] +pub fn svextw_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtw.nxv2i64")] + fn _svextw_u64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svextw_u64_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 32 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtw))] +pub fn svextw_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svextw_u64_m(op, pg, op) +} +#[doc = "Zero-extend the low 32 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtw))] +pub fn svextw_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svextw_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_f32(tuple: svfloat32x2_t) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_f64(tuple: svfloat64x2_t) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_s8(tuple: svint8x2_t) -> svint8_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_s16(tuple: svint16x2_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_s32(tuple: svint32x2_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_s64(tuple: svint64x2_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_u8(tuple: svuint8x2_t) -> svuint8_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_u16(tuple: svuint16x2_t) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_u32(tuple: svuint32x2_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_u64(tuple: svuint64x2_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_f32(tuple: svfloat32x3_t) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_f64(tuple: svfloat64x3_t) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_s8(tuple: svint8x3_t) -> svint8_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_s16(tuple: svint16x3_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_s32(tuple: svint32x3_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_s64(tuple: svint64x3_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_u8(tuple: svuint8x3_t) -> svuint8_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_u16(tuple: svuint16x3_t) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_u32(tuple: svuint32x3_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_u64(tuple: svuint64x3_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_f32(tuple: svfloat32x4_t) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_f64(tuple: svfloat64x4_t) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_s8(tuple: svint8x4_t) -> svint8_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_s16(tuple: svint16x4_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_s32(tuple: svint32x4_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_s64(tuple: svint64x4_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_u8(tuple: svuint8x4_t) -> svuint8_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_u16(tuple: svuint16x4_t) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_u32(tuple: svuint32x4_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_u64(tuple: svuint64x4_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_s8(base: i8, step: i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv16i8")] + fn _svindex_s8(base: i8, step: i8) -> svint8_t; + } + unsafe { _svindex_s8(base, step) } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_s16(base: i16, step: i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv8i16")] + fn _svindex_s16(base: i16, step: i16) -> svint16_t; + } + unsafe { _svindex_s16(base, step) } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_s32(base: i32, step: i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv4i32")] + fn _svindex_s32(base: i32, step: i32) -> svint32_t; + } + unsafe { _svindex_s32(base, step) } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_s64(base: i64, step: i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv2i64")] + fn _svindex_s64(base: i64, step: i64) -> svint64_t; + } + unsafe { _svindex_s64(base, step) } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_u8(base: u8, step: u8) -> svuint8_t { + unsafe { svindex_s8(base.as_signed(), step.as_signed()).as_unsigned() } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_u16(base: u16, step: u16) -> svuint16_t { + unsafe { svindex_s16(base.as_signed(), step.as_signed()).as_unsigned() } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_u32(base: u32, step: u32) -> svuint32_t { + unsafe { svindex_s32(base.as_signed(), step.as_signed()).as_unsigned() } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_u64(base: u64, step: u64) -> svuint64_t { + unsafe { svindex_s64(base.as_signed(), step.as_signed()).as_unsigned() } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_f32(op1: svfloat32_t, op2: f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv4f32")] + fn _svinsr_n_f32(op1: svfloat32_t, op2: f32) -> svfloat32_t; + } + unsafe { _svinsr_n_f32(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_f64(op1: svfloat64_t, op2: f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv2f64")] + fn _svinsr_n_f64(op1: svfloat64_t, op2: f64) -> svfloat64_t; + } + unsafe { _svinsr_n_f64(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv16i8")] + fn _svinsr_n_s8(op1: svint8_t, op2: i8) -> svint8_t; + } + unsafe { _svinsr_n_s8(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv8i16")] + fn _svinsr_n_s16(op1: svint16_t, op2: i16) -> svint16_t; + } + unsafe { _svinsr_n_s16(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv4i32")] + fn _svinsr_n_s32(op1: svint32_t, op2: i32) -> svint32_t; + } + unsafe { _svinsr_n_s32(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv2i64")] + fn _svinsr_n_s64(op1: svint64_t, op2: i64) -> svint64_t; + } + unsafe { _svinsr_n_s64(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + unsafe { svinsr_n_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + unsafe { svinsr_n_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + unsafe { svinsr_n_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + unsafe { svinsr_n_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv4f32")] + fn _svlasta_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svlasta_f32(pg.sve_into(), op) } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv2f64")] + fn _svlasta_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svlasta_f64(pg.sve_into(), op) } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv16i8")] + fn _svlasta_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svlasta_s8(pg, op) } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv8i16")] + fn _svlasta_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svlasta_s16(pg.sve_into(), op) } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv4i32")] + fn _svlasta_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svlasta_s32(pg.sve_into(), op) } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv2i64")] + fn _svlasta_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svlasta_s64(pg.sve_into(), op) } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { svlasta_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { svlasta_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { svlasta_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { svlasta_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv4f32")] + fn _svlastb_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svlastb_f32(pg.sve_into(), op) } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv2f64")] + fn _svlastb_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svlastb_f64(pg.sve_into(), op) } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv16i8")] + fn _svlastb_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svlastb_s8(pg, op) } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv8i16")] + fn _svlastb_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svlastb_s16(pg.sve_into(), op) } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv4i32")] + fn _svlastb_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svlastb_s32(pg.sve_into(), op) } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv2i64")] + fn _svlastb_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svlastb_s64(pg.sve_into(), op) } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { svlastb_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { svlastb_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { svlastb_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { svlastb_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4f32")] + fn _svld1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svld1_f32(pg.sve_into(), base) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2f64")] + fn _svld1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svld1_f64(pg.sve_into(), base) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv16i8")] + fn _svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svld1_s8(pg, base) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i16")] + fn _svld1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svld1_s16(pg.sve_into(), base) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i32")] + fn _svld1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svld1_s32(pg.sve_into(), base) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i64")] + fn _svld1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svld1_s64(pg.sve_into(), base) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svld1_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svld1_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svld1_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svld1_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32index_f32( + pg: svbool_t, + base: *const f32, + indices: svint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4f32" + )] + fn _svld1_gather_s32index_f32( + pg: svbool4_t, + base: *const f32, + indices: svint32_t, + ) -> svfloat32_t; + } + _svld1_gather_s32index_f32(pg.sve_into(), base, indices) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32index_s32( + pg: svbool_t, + base: *const i32, + indices: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i32" + )] + fn _svld1_gather_s32index_s32( + pg: svbool4_t, + base: *const i32, + indices: svint32_t, + ) -> svint32_t; + } + _svld1_gather_s32index_s32(pg.sve_into(), base, indices) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32index_u32( + pg: svbool_t, + base: *const u32, + indices: svint32_t, +) -> svuint32_t { + svld1_gather_s32index_s32(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64index_f64( + pg: svbool_t, + base: *const f64, + indices: svint64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2f64" + )] + fn _svld1_gather_s64index_f64( + pg: svbool2_t, + base: *const f64, + indices: svint64_t, + ) -> svfloat64_t; + } + _svld1_gather_s64index_f64(pg.sve_into(), base, indices) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64index_s64( + pg: svbool_t, + base: *const i64, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i64" + )] + fn _svld1_gather_s64index_s64( + pg: svbool2_t, + base: *const i64, + indices: svint64_t, + ) -> svint64_t; + } + _svld1_gather_s64index_s64(pg.sve_into(), base, indices) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64index_u64( + pg: svbool_t, + base: *const u64, + indices: svint64_t, +) -> svuint64_t { + svld1_gather_s64index_s64(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32index_f32( + pg: svbool_t, + base: *const f32, + indices: svuint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4f32" + )] + fn _svld1_gather_u32index_f32( + pg: svbool4_t, + base: *const f32, + indices: svint32_t, + ) -> svfloat32_t; + } + _svld1_gather_u32index_f32(pg.sve_into(), base, indices.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32index_s32( + pg: svbool_t, + base: *const i32, + indices: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i32" + )] + fn _svld1_gather_u32index_s32( + pg: svbool4_t, + base: *const i32, + indices: svint32_t, + ) -> svint32_t; + } + _svld1_gather_u32index_s32(pg.sve_into(), base, indices.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32index_u32( + pg: svbool_t, + base: *const u32, + indices: svuint32_t, +) -> svuint32_t { + svld1_gather_u32index_s32(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64index_f64( + pg: svbool_t, + base: *const f64, + indices: svuint64_t, +) -> svfloat64_t { + svld1_gather_s64index_f64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64index_s64( + pg: svbool_t, + base: *const i64, + indices: svuint64_t, +) -> svint64_t { + svld1_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64index_u64( + pg: svbool_t, + base: *const u64, + indices: svuint64_t, +) -> svuint64_t { + svld1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4f32" + )] + fn _svld1_gather_s32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svld1_gather_s32offset_f32(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32" + )] + fn _svld1_gather_s32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svld1_gather_s32offset_s32(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svint32_t, +) -> svuint32_t { + svld1_gather_s32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svint64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2f64" + )] + fn _svld1_gather_s64offset_f64( + pg: svbool2_t, + base: *const f64, + offsets: svint64_t, + ) -> svfloat64_t; + } + _svld1_gather_s64offset_f64(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i64" + )] + fn _svld1_gather_s64offset_s64( + pg: svbool2_t, + base: *const i64, + offsets: svint64_t, + ) -> svint64_t; + } + _svld1_gather_s64offset_s64(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svint64_t, +) -> svuint64_t { + svld1_gather_s64offset_s64(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svuint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4f32" + )] + fn _svld1_gather_u32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svld1_gather_u32offset_f32(pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32" + )] + fn _svld1_gather_u32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svld1_gather_u32offset_s32(pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svuint32_t, +) -> svuint32_t { + svld1_gather_u32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svuint64_t, +) -> svfloat64_t { + svld1_gather_s64offset_f64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svuint64_t, +) -> svint64_t { + svld1_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svuint64_t, +) -> svuint64_t { + svld1_gather_s64offset_s64(pg, base.as_signed(), offsets.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svfloat32_t { + svld1_gather_u32base_offset_f32(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svfloat64_t { + svld1_gather_u64base_offset_f64(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_index_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svfloat32_t { + svld1_gather_u32base_offset_f32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svld1_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svld1_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_index_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svfloat64_t { + svld1_gather_u64base_offset_f64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_offset_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svld1_gather_u32base_offset_f32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svfloat32_t; + } + _svld1_gather_u32base_offset_f32(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svld1_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svint32_t; + } + _svld1_gather_u32base_offset_s32(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svld1_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_offset_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svld1_gather_u64base_offset_f64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svfloat64_t; + } + _svld1_gather_u64base_offset_f64(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svld1_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svint64_t; + } + _svld1_gather_u64base_offset_s64(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svld1_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t { + svld1_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t { + svld1_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t { + svld1_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t { + svld1_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t { + svld1_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t { + svld1_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t { + svld1_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t { + svld1_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t { + svld1_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t { + svld1_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1row))] +pub unsafe fn svld1ro_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv4f32")] + fn _svld1ro_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svld1ro_f32(pg.sve_into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rod))] +pub unsafe fn svld1ro_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv2f64")] + fn _svld1ro_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svld1ro_f64(pg.sve_into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rob))] +pub unsafe fn svld1ro_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv16i8")] + fn _svld1ro_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svld1ro_s8(pg, base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1roh))] +pub unsafe fn svld1ro_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv8i16")] + fn _svld1ro_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svld1ro_s16(pg.sve_into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1row))] +pub unsafe fn svld1ro_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv4i32")] + fn _svld1ro_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svld1ro_s32(pg.sve_into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rod))] +pub unsafe fn svld1ro_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv2i64")] + fn _svld1ro_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svld1ro_s64(pg.sve_into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rob))] +pub unsafe fn svld1ro_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svld1ro_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1roh))] +pub unsafe fn svld1ro_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svld1ro_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1row))] +pub unsafe fn svld1ro_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svld1ro_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rod))] +pub unsafe fn svld1ro_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svld1ro_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqw))] +pub unsafe fn svld1rq_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv4f32")] + fn _svld1rq_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svld1rq_f32(pg.sve_into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqd))] +pub unsafe fn svld1rq_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv2f64")] + fn _svld1rq_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svld1rq_f64(pg.sve_into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqb))] +pub unsafe fn svld1rq_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv16i8")] + fn _svld1rq_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svld1rq_s8(pg, base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqh))] +pub unsafe fn svld1rq_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv8i16")] + fn _svld1rq_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svld1rq_s16(pg.sve_into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqw))] +pub unsafe fn svld1rq_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv4i32")] + fn _svld1rq_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svld1rq_s32(pg.sve_into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqd))] +pub unsafe fn svld1rq_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv2i64")] + fn _svld1rq_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svld1rq_s64(pg.sve_into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqb))] +pub unsafe fn svld1rq_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svld1rq_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqh))] +pub unsafe fn svld1rq_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svld1rq_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqw))] +pub unsafe fn svld1rq_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svld1rq_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqd))] +pub unsafe fn svld1rq_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svld1rq_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_s32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8" + )] + fn _svld1sb_gather_s32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast(_svld1sb_gather_s32offset_s32(pg.sve_into(), base, offsets)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16" + )] + fn _svld1sh_gather_s32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svld1sh_gather_s32offset_s32(pg.sve_into(), base, offsets)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_s32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svint32_t, +) -> svuint32_t { + svld1sb_gather_s32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svint32_t, +) -> svuint32_t { + svld1sh_gather_s32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_s64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i8" + )] + fn _svld1sb_gather_s64offset_s64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast(_svld1sb_gather_s64offset_s64(pg.sve_into(), base, offsets)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i16" + )] + fn _svld1sh_gather_s64offset_s64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svld1sh_gather_s64offset_s64(pg.sve_into(), base, offsets)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_s64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i32" + )] + fn _svld1sw_gather_s64offset_s64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svld1sw_gather_s64offset_s64(pg.sve_into(), base, offsets)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_s64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svuint64_t { + svld1sb_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svuint64_t { + svld1sh_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_s64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svuint64_t { + svld1sw_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8" + )] + fn _svld1sb_gather_u32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast(_svld1sb_gather_u32offset_s32( + pg.sve_into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16" + )] + fn _svld1sh_gather_u32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svld1sh_gather_u32offset_s32( + pg.sve_into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svuint32_t { + svld1sb_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svuint32_t { + svld1sh_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svint64_t { + svld1sb_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svint64_t { + svld1sh_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svint64_t { + svld1sw_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svuint64_t { + svld1sb_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svuint64_t { + svld1sh_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svuint64_t { + svld1sw_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svld1sb_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast(_svld1sb_gather_u32base_offset_s32( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svld1sh_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svld1sh_gather_u32base_offset_s32( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svld1sb_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svld1sh_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svld1sb_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast(_svld1sb_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svld1sh_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svld1sh_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svld1sw_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svld1sw_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svld1sb_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svld1sh_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svld1sw_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1sb_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1sh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1sb_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1sh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1sb_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1sh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1sw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1sb_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1sh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1sw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i8")] + fn _svld1sb_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + crate::intrinsics::simd::simd_cast(_svld1sb_s16(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i8")] + fn _svld1sb_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast(_svld1sb_s32(pg.sve_into(), base)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i16")] + fn _svld1sh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svld1sh_s32(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i8")] + fn _svld1sb_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast(_svld1sb_s64(pg.sve_into(), base)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i16")] + fn _svld1sh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svld1sh_s64(pg.sve_into(), base)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i32")] + fn _svld1sw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svld1sw_s64(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t { + svld1sb_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t { + svld1sb_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t { + svld1sh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t { + svld1sb_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t { + svld1sh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t { + svld1sw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> svint16_t { + svld1sb_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> svint32_t { + svld1sb_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> svint32_t { + svld1sh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> svint64_t { + svld1sb_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> svint64_t { + svld1sh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> svint64_t { + svld1sw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> svuint16_t { + svld1sb_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> svuint32_t { + svld1sb_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> svuint32_t { + svld1sh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> svuint64_t { + svld1sb_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> svuint64_t { + svld1sh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> svuint64_t { + svld1sw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s32index_s32( + pg: svbool_t, + base: *const i16, + indices: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16" + )] + fn _svld1sh_gather_s32index_s32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svld1sh_gather_s32index_s32(pg.sve_into(), base, indices)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s32index_u32( + pg: svbool_t, + base: *const i16, + indices: svint32_t, +) -> svuint32_t { + svld1sh_gather_s32index_s32(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s64index_s64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i16" + )] + fn _svld1sh_gather_s64index_s64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svld1sh_gather_s64index_s64(pg.sve_into(), base, indices)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_s64index_s64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i32" + )] + fn _svld1sw_gather_s64index_s64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svld1sw_gather_s64index_s64(pg.sve_into(), base, indices)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s64index_u64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svuint64_t { + svld1sh_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_s64index_u64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svuint64_t { + svld1sw_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32index_s32( + pg: svbool_t, + base: *const i16, + indices: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16" + )] + fn _svld1sh_gather_u32index_s32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svld1sh_gather_u32index_s32( + pg.sve_into(), + base, + indices.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32index_u32( + pg: svbool_t, + base: *const i16, + indices: svuint32_t, +) -> svuint32_t { + svld1sh_gather_u32index_s32(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64index_s64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svint64_t { + svld1sh_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64index_s64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svint64_t { + svld1sw_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64index_u64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svuint64_t { + svld1sh_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64index_u64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svuint64_t { + svld1sw_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svld1sh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svld1sh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1sh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1sw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1sh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1sw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_s32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svint32_t, +) -> svint32_t { + svld1ub_gather_s32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svint32_t, +) -> svint32_t { + svld1uh_gather_s32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_s32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8" + )] + fn _svld1ub_gather_s32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast::( + _svld1ub_gather_s32offset_u32(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16" + )] + fn _svld1uh_gather_s32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svld1uh_gather_s32offset_u32(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_s64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svint64_t { + svld1ub_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svint64_t { + svld1uh_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_s64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svint64_t { + svld1uw_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_s64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i8" + )] + fn _svld1ub_gather_s64offset_u64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast::( + _svld1ub_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i16" + )] + fn _svld1uh_gather_s64offset_u64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svld1uh_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_s64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i32" + )] + fn _svld1uw_gather_s64offset_u64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svld1uw_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svint32_t { + svld1ub_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svint32_t { + svld1uh_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8" + )] + fn _svld1ub_gather_u32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast::( + _svld1ub_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16" + )] + fn _svld1uh_gather_u32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svld1uh_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svint64_t { + svld1ub_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svint64_t { + svld1uh_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svint64_t { + svld1uw_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svuint64_t { + svld1ub_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svuint64_t { + svld1uh_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svuint64_t { + svld1uw_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svld1ub_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svld1uh_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svld1ub_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast::( + _svld1ub_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svld1uh_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svld1uh_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svld1ub_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svld1uh_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svld1uw_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svld1ub_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast::( + _svld1ub_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svld1uh_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svld1uh_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svld1uw_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svld1uw_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1ub_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1uh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1ub_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1uh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1ub_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1uh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1uw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1ub_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1uh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1uw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i8")] + fn _svld1ub_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + crate::intrinsics::simd::simd_cast::( + _svld1ub_s16(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i8")] + fn _svld1ub_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast::( + _svld1ub_s32(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i16")] + fn _svld1uh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svld1uh_s32(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i8")] + fn _svld1ub_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast::( + _svld1ub_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i16")] + fn _svld1uh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svld1uh_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i32")] + fn _svld1uw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svld1uw_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t { + svld1ub_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t { + svld1ub_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t { + svld1uh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t { + svld1ub_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t { + svld1uh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t { + svld1uw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> svint16_t { + svld1ub_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> svint32_t { + svld1ub_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> svint32_t { + svld1uh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> svint64_t { + svld1ub_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> svint64_t { + svld1uh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> svint64_t { + svld1uw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> svuint16_t { + svld1ub_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> svuint32_t { + svld1ub_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> svuint32_t { + svld1uh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> svuint64_t { + svld1ub_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> svuint64_t { + svld1uh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> svuint64_t { + svld1uw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s32index_s32( + pg: svbool_t, + base: *const u16, + indices: svint32_t, +) -> svint32_t { + svld1uh_gather_s32index_u32(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s32index_u32( + pg: svbool_t, + base: *const u16, + indices: svint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16" + )] + fn _svld1uh_gather_s32index_u32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svld1uh_gather_s32index_u32(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s64index_s64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svint64_t { + svld1uh_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_s64index_s64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svint64_t { + svld1uw_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s64index_u64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i16" + )] + fn _svld1uh_gather_s64index_u64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svld1uh_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_s64index_u64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i32" + )] + fn _svld1uw_gather_s64index_u64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svld1uw_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32index_s32( + pg: svbool_t, + base: *const u16, + indices: svuint32_t, +) -> svint32_t { + svld1uh_gather_u32index_u32(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32index_u32( + pg: svbool_t, + base: *const u16, + indices: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16" + )] + fn _svld1uh_gather_u32index_u32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svld1uh_gather_u32index_u32(pg.sve_into(), base.as_signed(), indices.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64index_s64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svint64_t { + svld1uh_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64index_s64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svint64_t { + svld1uw_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64index_u64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svuint64_t { + svld1uh_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64index_u64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svuint64_t { + svld1uw_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svld1uh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svld1uh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1uh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1uw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1uh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1uw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_f32(pg: svbool_t, base: *const f32) -> svfloat32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.sret.nxv4f32" + )] + fn _svld2_f32(pg: svbool4_t, base: *const f32) -> svfloat32x2_t; + } + _svld2_f32(pg.sve_into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_f64(pg: svbool_t, base: *const f64) -> svfloat64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.sret.nxv2f64" + )] + fn _svld2_f64(pg: svbool2_t, base: *const f64) -> svfloat64x2_t; + } + _svld2_f64(pg.sve_into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2b))] +pub unsafe fn svld2_s8(pg: svbool_t, base: *const i8) -> svint8x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.sret.nxv16i8" + )] + fn _svld2_s8(pg: svbool_t, base: *const i8) -> svint8x2_t; + } + _svld2_s8(pg, base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2h))] +pub unsafe fn svld2_s16(pg: svbool_t, base: *const i16) -> svint16x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.sret.nxv8i16" + )] + fn _svld2_s16(pg: svbool8_t, base: *const i16) -> svint16x2_t; + } + _svld2_s16(pg.sve_into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_s32(pg: svbool_t, base: *const i32) -> svint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.sret.nxv4i32" + )] + fn _svld2_s32(pg: svbool4_t, base: *const i32) -> svint32x2_t; + } + _svld2_s32(pg.sve_into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_s64(pg: svbool_t, base: *const i64) -> svint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.sret.nxv2i64" + )] + fn _svld2_s64(pg: svbool2_t, base: *const i64) -> svint64x2_t; + } + _svld2_s64(pg.sve_into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2b))] +pub unsafe fn svld2_u8(pg: svbool_t, base: *const u8) -> svuint8x2_t { + svld2_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2h))] +pub unsafe fn svld2_u16(pg: svbool_t, base: *const u16) -> svuint16x2_t { + svld2_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_u32(pg: svbool_t, base: *const u32) -> svuint32x2_t { + svld2_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_u64(pg: svbool_t, base: *const u64) -> svuint64x2_t { + svld2_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32x2_t { + svld2_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64x2_t { + svld2_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2b))] +pub unsafe fn svld2_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x2_t { + svld2_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2h))] +pub unsafe fn svld2_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16x2_t { + svld2_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32x2_t { + svld2_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64x2_t { + svld2_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2b))] +pub unsafe fn svld2_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8x2_t { + svld2_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2h))] +pub unsafe fn svld2_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16x2_t { + svld2_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32x2_t { + svld2_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64x2_t { + svld2_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_f32(pg: svbool_t, base: *const f32) -> svfloat32x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.sret.nxv4f32" + )] + fn _svld3_f32(pg: svbool4_t, base: *const f32) -> svfloat32x3_t; + } + _svld3_f32(pg.sve_into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_f64(pg: svbool_t, base: *const f64) -> svfloat64x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.sret.nxv2f64" + )] + fn _svld3_f64(pg: svbool2_t, base: *const f64) -> svfloat64x3_t; + } + _svld3_f64(pg.sve_into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3b))] +pub unsafe fn svld3_s8(pg: svbool_t, base: *const i8) -> svint8x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.sret.nxv16i8" + )] + fn _svld3_s8(pg: svbool_t, base: *const i8) -> svint8x3_t; + } + _svld3_s8(pg, base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3h))] +pub unsafe fn svld3_s16(pg: svbool_t, base: *const i16) -> svint16x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.sret.nxv8i16" + )] + fn _svld3_s16(pg: svbool8_t, base: *const i16) -> svint16x3_t; + } + _svld3_s16(pg.sve_into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_s32(pg: svbool_t, base: *const i32) -> svint32x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.sret.nxv4i32" + )] + fn _svld3_s32(pg: svbool4_t, base: *const i32) -> svint32x3_t; + } + _svld3_s32(pg.sve_into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_s64(pg: svbool_t, base: *const i64) -> svint64x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.sret.nxv2i64" + )] + fn _svld3_s64(pg: svbool2_t, base: *const i64) -> svint64x3_t; + } + _svld3_s64(pg.sve_into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3b))] +pub unsafe fn svld3_u8(pg: svbool_t, base: *const u8) -> svuint8x3_t { + svld3_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3h))] +pub unsafe fn svld3_u16(pg: svbool_t, base: *const u16) -> svuint16x3_t { + svld3_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_u32(pg: svbool_t, base: *const u32) -> svuint32x3_t { + svld3_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_u64(pg: svbool_t, base: *const u64) -> svuint64x3_t { + svld3_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32x3_t { + svld3_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64x3_t { + svld3_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3b))] +pub unsafe fn svld3_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x3_t { + svld3_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3h))] +pub unsafe fn svld3_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16x3_t { + svld3_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32x3_t { + svld3_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64x3_t { + svld3_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3b))] +pub unsafe fn svld3_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8x3_t { + svld3_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3h))] +pub unsafe fn svld3_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16x3_t { + svld3_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32x3_t { + svld3_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64x3_t { + svld3_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_f32(pg: svbool_t, base: *const f32) -> svfloat32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.sret.nxv4f32" + )] + fn _svld4_f32(pg: svbool4_t, base: *const f32) -> svfloat32x4_t; + } + _svld4_f32(pg.sve_into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_f64(pg: svbool_t, base: *const f64) -> svfloat64x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.sret.nxv2f64" + )] + fn _svld4_f64(pg: svbool2_t, base: *const f64) -> svfloat64x4_t; + } + _svld4_f64(pg.sve_into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4b))] +pub unsafe fn svld4_s8(pg: svbool_t, base: *const i8) -> svint8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.sret.nxv16i8" + )] + fn _svld4_s8(pg: svbool_t, base: *const i8) -> svint8x4_t; + } + _svld4_s8(pg, base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4h))] +pub unsafe fn svld4_s16(pg: svbool_t, base: *const i16) -> svint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.sret.nxv8i16" + )] + fn _svld4_s16(pg: svbool8_t, base: *const i16) -> svint16x4_t; + } + _svld4_s16(pg.sve_into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_s32(pg: svbool_t, base: *const i32) -> svint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.sret.nxv4i32" + )] + fn _svld4_s32(pg: svbool4_t, base: *const i32) -> svint32x4_t; + } + _svld4_s32(pg.sve_into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_s64(pg: svbool_t, base: *const i64) -> svint64x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.sret.nxv2i64" + )] + fn _svld4_s64(pg: svbool2_t, base: *const i64) -> svint64x4_t; + } + _svld4_s64(pg.sve_into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4b))] +pub unsafe fn svld4_u8(pg: svbool_t, base: *const u8) -> svuint8x4_t { + svld4_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4h))] +pub unsafe fn svld4_u16(pg: svbool_t, base: *const u16) -> svuint16x4_t { + svld4_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_u32(pg: svbool_t, base: *const u32) -> svuint32x4_t { + svld4_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_u64(pg: svbool_t, base: *const u64) -> svuint64x4_t { + svld4_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32x4_t { + svld4_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64x4_t { + svld4_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4b))] +pub unsafe fn svld4_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x4_t { + svld4_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4h))] +pub unsafe fn svld4_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16x4_t { + svld4_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32x4_t { + svld4_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64x4_t { + svld4_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4b))] +pub unsafe fn svld4_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8x4_t { + svld4_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4h))] +pub unsafe fn svld4_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16x4_t { + svld4_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32x4_t { + svld4_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64x4_t { + svld4_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4f32")] + fn _svldff1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svldff1_f32(pg.sve_into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2f64")] + fn _svldff1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svldff1_f64(pg.sve_into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv16i8")] + fn _svldff1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svldff1_s8(pg, base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv8i16")] + fn _svldff1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svldff1_s16(pg.sve_into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i32")] + fn _svldff1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svldff1_s32(pg.sve_into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i64")] + fn _svldff1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svldff1_s64(pg.sve_into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svldff1_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svldff1_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svldff1_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svldff1_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32index_f32( + pg: svbool_t, + base: *const f32, + indices: svint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4f32" + )] + fn _svldff1_gather_s32index_f32( + pg: svbool4_t, + base: *const f32, + indices: svint32_t, + ) -> svfloat32_t; + } + _svldff1_gather_s32index_f32(pg.sve_into(), base, indices) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32index_s32( + pg: svbool_t, + base: *const i32, + indices: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i32" + )] + fn _svldff1_gather_s32index_s32( + pg: svbool4_t, + base: *const i32, + indices: svint32_t, + ) -> svint32_t; + } + _svldff1_gather_s32index_s32(pg.sve_into(), base, indices) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32index_u32( + pg: svbool_t, + base: *const u32, + indices: svint32_t, +) -> svuint32_t { + svldff1_gather_s32index_s32(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64index_f64( + pg: svbool_t, + base: *const f64, + indices: svint64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2f64" + )] + fn _svldff1_gather_s64index_f64( + pg: svbool2_t, + base: *const f64, + indices: svint64_t, + ) -> svfloat64_t; + } + _svldff1_gather_s64index_f64(pg.sve_into(), base, indices) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64index_s64( + pg: svbool_t, + base: *const i64, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i64" + )] + fn _svldff1_gather_s64index_s64( + pg: svbool2_t, + base: *const i64, + indices: svint64_t, + ) -> svint64_t; + } + _svldff1_gather_s64index_s64(pg.sve_into(), base, indices) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64index_u64( + pg: svbool_t, + base: *const u64, + indices: svint64_t, +) -> svuint64_t { + svldff1_gather_s64index_s64(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32index_f32( + pg: svbool_t, + base: *const f32, + indices: svuint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4f32" + )] + fn _svldff1_gather_u32index_f32( + pg: svbool4_t, + base: *const f32, + indices: svint32_t, + ) -> svfloat32_t; + } + _svldff1_gather_u32index_f32(pg.sve_into(), base, indices.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32index_s32( + pg: svbool_t, + base: *const i32, + indices: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i32" + )] + fn _svldff1_gather_u32index_s32( + pg: svbool4_t, + base: *const i32, + indices: svint32_t, + ) -> svint32_t; + } + _svldff1_gather_u32index_s32(pg.sve_into(), base, indices.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32index_u32( + pg: svbool_t, + base: *const u32, + indices: svuint32_t, +) -> svuint32_t { + svldff1_gather_u32index_s32(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64index_f64( + pg: svbool_t, + base: *const f64, + indices: svuint64_t, +) -> svfloat64_t { + svldff1_gather_s64index_f64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64index_s64( + pg: svbool_t, + base: *const i64, + indices: svuint64_t, +) -> svint64_t { + svldff1_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64index_u64( + pg: svbool_t, + base: *const u64, + indices: svuint64_t, +) -> svuint64_t { + svldff1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4f32" + )] + fn _svldff1_gather_s32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svldff1_gather_s32offset_f32(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i32" + )] + fn _svldff1_gather_s32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svldff1_gather_s32offset_s32(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svint32_t, +) -> svuint32_t { + svldff1_gather_s32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svint64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2f64" + )] + fn _svldff1_gather_s64offset_f64( + pg: svbool2_t, + base: *const f64, + offsets: svint64_t, + ) -> svfloat64_t; + } + _svldff1_gather_s64offset_f64(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i64" + )] + fn _svldff1_gather_s64offset_s64( + pg: svbool2_t, + base: *const i64, + offsets: svint64_t, + ) -> svint64_t; + } + _svldff1_gather_s64offset_s64(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svint64_t, +) -> svuint64_t { + svldff1_gather_s64offset_s64(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svuint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4f32" + )] + fn _svldff1_gather_u32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svldff1_gather_u32offset_f32(pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i32" + )] + fn _svldff1_gather_u32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svldff1_gather_u32offset_s32(pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svuint32_t, +) -> svuint32_t { + svldff1_gather_u32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svuint64_t, +) -> svfloat64_t { + svldff1_gather_s64offset_f64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svuint64_t, +) -> svint64_t { + svldff1_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svuint64_t, +) -> svuint64_t { + svldff1_gather_s64offset_s64(pg, base.as_signed(), offsets.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svfloat32_t { + svldff1_gather_u32base_offset_f32(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svfloat64_t { + svldff1_gather_u64base_offset_f64(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_index_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svfloat32_t { + svldff1_gather_u32base_offset_f32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldff1_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldff1_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_index_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svfloat64_t { + svldff1_gather_u64base_offset_f64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_offset_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svldff1_gather_u32base_offset_f32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svfloat32_t; + } + _svldff1_gather_u32base_offset_f32(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svldff1_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svint32_t; + } + _svldff1_gather_u32base_offset_s32(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldff1_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_offset_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svldff1_gather_u64base_offset_f64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svfloat64_t; + } + _svldff1_gather_u64base_offset_f64(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svldff1_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svint64_t; + } + _svldff1_gather_u64base_offset_s64(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldff1_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t { + svldff1_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t { + svldff1_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t { + svldff1_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t { + svldff1_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t { + svldff1_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t { + svldff1_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t { + svldff1_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t { + svldff1_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t { + svldff1_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t { + svldff1_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_s32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i8" + )] + fn _svldff1sb_gather_s32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast(_svldff1sb_gather_s32offset_s32( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i16" + )] + fn _svldff1sh_gather_s32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svldff1sh_gather_s32offset_s32( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_s32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svint32_t, +) -> svuint32_t { + svldff1sb_gather_s32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svint32_t, +) -> svuint32_t { + svldff1sh_gather_s32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_s64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i8" + )] + fn _svldff1sb_gather_s64offset_s64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast(_svldff1sb_gather_s64offset_s64( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i16" + )] + fn _svldff1sh_gather_s64offset_s64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svldff1sh_gather_s64offset_s64( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_s64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i32" + )] + fn _svldff1sw_gather_s64offset_s64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svldff1sw_gather_s64offset_s64( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_s64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svuint64_t { + svldff1sb_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svuint64_t { + svldff1sh_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_s64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svuint64_t { + svldff1sw_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8" + )] + fn _svldff1sb_gather_u32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast(_svldff1sb_gather_u32offset_s32( + pg.sve_into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16" + )] + fn _svldff1sh_gather_u32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svldff1sh_gather_u32offset_s32( + pg.sve_into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svuint32_t { + svldff1sb_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svuint32_t { + svldff1sh_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svint64_t { + svldff1sb_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svint64_t { + svldff1sh_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svint64_t { + svldff1sw_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svuint64_t { + svldff1sb_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svuint64_t { + svldff1sh_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svuint64_t { + svldff1sw_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svldff1sb_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast(_svldff1sb_gather_u32base_offset_s32( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svldff1sh_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svldff1sh_gather_u32base_offset_s32( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldff1sb_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldff1sh_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svldff1sb_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast(_svldff1sb_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svldff1sh_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svldff1sh_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svldff1sw_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svldff1sw_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldff1sb_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldff1sh_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldff1sw_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1sb_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1sh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1sb_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1sh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1sb_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1sh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1sw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1sb_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1sh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1sw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv8i8")] + fn _svldff1sb_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + crate::intrinsics::simd::simd_cast(_svldff1sb_s16(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i8")] + fn _svldff1sb_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast(_svldff1sb_s32(pg.sve_into(), base)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i16")] + fn _svldff1sh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svldff1sh_s32(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i8")] + fn _svldff1sb_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast(_svldff1sb_s64(pg.sve_into(), base)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i16")] + fn _svldff1sh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svldff1sh_s64(pg.sve_into(), base)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i32")] + fn _svldff1sw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svldff1sw_s64(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t { + svldff1sb_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t { + svldff1sb_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t { + svldff1sh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t { + svldff1sb_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t { + svldff1sh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t { + svldff1sw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> svint16_t { + svldff1sb_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> svint32_t { + svldff1sb_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> svint32_t { + svldff1sh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> svint64_t { + svldff1sb_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> svint64_t { + svldff1sh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> svint64_t { + svldff1sw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> svuint16_t { + svldff1sb_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> svuint32_t { + svldff1sb_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> svuint32_t { + svldff1sh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> svuint64_t { + svldff1sb_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> svuint64_t { + svldff1sh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> svuint64_t { + svldff1sw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s32index_s32( + pg: svbool_t, + base: *const i16, + indices: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i16" + )] + fn _svldff1sh_gather_s32index_s32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svldff1sh_gather_s32index_s32(pg.sve_into(), base, indices)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s32index_u32( + pg: svbool_t, + base: *const i16, + indices: svint32_t, +) -> svuint32_t { + svldff1sh_gather_s32index_s32(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s64index_s64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i16" + )] + fn _svldff1sh_gather_s64index_s64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svldff1sh_gather_s64index_s64(pg.sve_into(), base, indices)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_s64index_s64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i32" + )] + fn _svldff1sw_gather_s64index_s64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svldff1sw_gather_s64index_s64(pg.sve_into(), base, indices)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s64index_u64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svuint64_t { + svldff1sh_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_s64index_u64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svuint64_t { + svldff1sw_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32index_s32( + pg: svbool_t, + base: *const i16, + indices: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16" + )] + fn _svldff1sh_gather_u32index_s32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svldff1sh_gather_u32index_s32( + pg.sve_into(), + base, + indices.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32index_u32( + pg: svbool_t, + base: *const i16, + indices: svuint32_t, +) -> svuint32_t { + svldff1sh_gather_u32index_s32(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64index_s64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svint64_t { + svldff1sh_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64index_s64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svint64_t { + svldff1sw_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64index_u64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svuint64_t { + svldff1sh_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64index_u64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svuint64_t { + svldff1sw_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldff1sh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldff1sh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1sh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1sw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1sh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1sw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_s32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svint32_t, +) -> svint32_t { + svldff1ub_gather_s32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svint32_t, +) -> svint32_t { + svldff1uh_gather_s32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_s32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i8" + )] + fn _svldff1ub_gather_s32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast::( + _svldff1ub_gather_s32offset_u32(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i16" + )] + fn _svldff1uh_gather_s32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uh_gather_s32offset_u32(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_s64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svint64_t { + svldff1ub_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svint64_t { + svldff1uh_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_s64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svint64_t { + svldff1uw_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_s64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i8" + )] + fn _svldff1ub_gather_s64offset_u64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast::( + _svldff1ub_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i16" + )] + fn _svldff1uh_gather_s64offset_u64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uh_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_s64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i32" + )] + fn _svldff1uw_gather_s64offset_u64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uw_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svint32_t { + svldff1ub_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svint32_t { + svldff1uh_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8" + )] + fn _svldff1ub_gather_u32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast::( + _svldff1ub_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16" + )] + fn _svldff1uh_gather_u32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uh_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svint64_t { + svldff1ub_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svint64_t { + svldff1uh_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svint64_t { + svldff1uw_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svuint64_t { + svldff1ub_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svuint64_t { + svldff1uh_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svuint64_t { + svldff1uw_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svldff1ub_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svldff1uh_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svldff1ub_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast::( + _svldff1ub_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svldff1uh_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uh_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldff1ub_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldff1uh_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldff1uw_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svldff1ub_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast::( + _svldff1ub_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svldff1uh_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uh_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svldff1uw_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uw_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1ub_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1uh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1ub_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1uh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1ub_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1uh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1uw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1ub_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1uh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1uw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv8i8")] + fn _svldff1ub_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + crate::intrinsics::simd::simd_cast::( + _svldff1ub_s16(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i8")] + fn _svldff1ub_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast::( + _svldff1ub_s32(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i16")] + fn _svldff1uh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uh_s32(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i8")] + fn _svldff1ub_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast::( + _svldff1ub_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i16")] + fn _svldff1uh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uh_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i32")] + fn _svldff1uw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uw_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t { + svldff1ub_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t { + svldff1ub_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t { + svldff1uh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t { + svldff1ub_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t { + svldff1uh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t { + svldff1uw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> svint16_t { + svldff1ub_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> svint32_t { + svldff1ub_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> svint32_t { + svldff1uh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> svint64_t { + svldff1ub_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> svint64_t { + svldff1uh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> svint64_t { + svldff1uw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> svuint16_t { + svldff1ub_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> svuint32_t { + svldff1ub_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> svuint32_t { + svldff1uh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> svuint64_t { + svldff1ub_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> svuint64_t { + svldff1uh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> svuint64_t { + svldff1uw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s32index_s32( + pg: svbool_t, + base: *const u16, + indices: svint32_t, +) -> svint32_t { + svldff1uh_gather_s32index_u32(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s32index_u32( + pg: svbool_t, + base: *const u16, + indices: svint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i16" + )] + fn _svldff1uh_gather_s32index_u32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uh_gather_s32index_u32(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s64index_s64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svint64_t { + svldff1uh_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_s64index_s64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svint64_t { + svldff1uw_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s64index_u64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i16" + )] + fn _svldff1uh_gather_s64index_u64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uh_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_s64index_u64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i32" + )] + fn _svldff1uw_gather_s64index_u64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uw_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32index_s32( + pg: svbool_t, + base: *const u16, + indices: svuint32_t, +) -> svint32_t { + svldff1uh_gather_u32index_u32(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32index_u32( + pg: svbool_t, + base: *const u16, + indices: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16" + )] + fn _svldff1uh_gather_u32index_u32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uh_gather_u32index_u32(pg.sve_into(), base.as_signed(), indices.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64index_s64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svint64_t { + svldff1uh_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64index_s64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svint64_t { + svldff1uw_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64index_u64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svuint64_t { + svldff1uh_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64index_u64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svuint64_t { + svldff1uw_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldff1uh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldff1uh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1uh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1uw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1uh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1uw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4f32")] + fn _svldnf1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svldnf1_f32(pg.sve_into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2f64")] + fn _svldnf1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svldnf1_f64(pg.sve_into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv16i8")] + fn _svldnf1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svldnf1_s8(pg, base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv8i16")] + fn _svldnf1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svldnf1_s16(pg.sve_into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i32")] + fn _svldnf1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svldnf1_s32(pg.sve_into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i64")] + fn _svldnf1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svldnf1_s64(pg.sve_into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svldnf1_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svldnf1_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svldnf1_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svldnf1_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t { + svldnf1_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t { + svldnf1_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t { + svldnf1_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t { + svldnf1_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t { + svldnf1_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t { + svldnf1_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t { + svldnf1_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t { + svldnf1_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t { + svldnf1_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t { + svldnf1_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv8i8")] + fn _svldnf1sb_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + crate::intrinsics::simd::simd_cast(_svldnf1sb_s16(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i8")] + fn _svldnf1sb_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast(_svldnf1sb_s32(pg.sve_into(), base)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i16")] + fn _svldnf1sh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svldnf1sh_s32(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i8")] + fn _svldnf1sb_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast(_svldnf1sb_s64(pg.sve_into(), base)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i16")] + fn _svldnf1sh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svldnf1sh_s64(pg.sve_into(), base)) +} +#[doc = "Load 32-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sw))] +pub unsafe fn svldnf1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i32")] + fn _svldnf1sw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svldnf1sw_s64(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t { + svldnf1sb_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t { + svldnf1sb_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t { + svldnf1sh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t { + svldnf1sb_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t { + svldnf1sh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sw))] +pub unsafe fn svldnf1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t { + svldnf1sw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> svint16_t { + svldnf1sb_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> svint32_t { + svldnf1sb_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> svint32_t { + svldnf1sh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> svint64_t { + svldnf1sb_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> svint64_t { + svldnf1sh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sw))] +pub unsafe fn svldnf1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> svint64_t { + svldnf1sw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> svuint16_t { + svldnf1sb_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> svuint32_t { + svldnf1sb_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> svuint32_t { + svldnf1sh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> svuint64_t { + svldnf1sb_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> svuint64_t { + svldnf1sh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sw))] +pub unsafe fn svldnf1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> svuint64_t { + svldnf1sw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv8i8")] + fn _svldnf1ub_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + crate::intrinsics::simd::simd_cast::( + _svldnf1ub_s16(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i8")] + fn _svldnf1ub_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast::( + _svldnf1ub_s32(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i16")] + fn _svldnf1uh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svldnf1uh_s32(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i8")] + fn _svldnf1ub_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast::( + _svldnf1ub_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i16")] + fn _svldnf1uh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svldnf1uh_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i32")] + fn _svldnf1uw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svldnf1uw_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t { + svldnf1ub_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t { + svldnf1ub_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t { + svldnf1uh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t { + svldnf1ub_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t { + svldnf1uh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t { + svldnf1uw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> svint16_t { + svldnf1ub_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> svint32_t { + svldnf1ub_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> svint32_t { + svldnf1uh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> svint64_t { + svldnf1ub_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> svint64_t { + svldnf1uh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> svint64_t { + svldnf1uw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> svuint16_t { + svldnf1ub_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> svuint32_t { + svldnf1ub_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> svuint32_t { + svldnf1uh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> svuint64_t { + svldnf1ub_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> svuint64_t { + svldnf1uh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> svuint64_t { + svldnf1uw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv4f32")] + fn _svldnt1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svldnt1_f32(pg.sve_into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv2f64")] + fn _svldnt1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svldnt1_f64(pg.sve_into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv16i8")] + fn _svldnt1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svldnt1_s8(pg, base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv8i16")] + fn _svldnt1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svldnt1_s16(pg.sve_into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv4i32")] + fn _svldnt1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svldnt1_s32(pg.sve_into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv2i64")] + fn _svldnt1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svldnt1_s64(pg.sve_into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svldnt1_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svldnt1_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svldnt1_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svldnt1_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t { + svldnt1_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t { + svldnt1_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t { + svldnt1_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t { + svldnt1_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t { + svldnt1_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t { + svldnt1_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t { + svldnt1_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t { + svldnt1_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t { + svldnt1_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t { + svldnt1_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntw))] +pub fn svlen_f32(_op: svfloat32_t) -> u64 { + svcntw() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntd))] +pub fn svlen_f64(_op: svfloat64_t) -> u64 { + svcntd() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rdvl))] +pub fn svlen_s8(_op: svint8_t) -> u64 { + svcntb() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnth))] +pub fn svlen_s16(_op: svint16_t) -> u64 { + svcnth() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntw))] +pub fn svlen_s32(_op: svint32_t) -> u64 { + svcntw() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntd))] +pub fn svlen_s64(_op: svint64_t) -> u64 { + svcntd() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rdvl))] +pub fn svlen_u8(_op: svuint8_t) -> u64 { + svcntb() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnth))] +pub fn svlen_u16(_op: svuint16_t) -> u64 { + svcnth() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntw))] +pub fn svlen_u32(_op: svuint32_t) -> u64 { + svcntw() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntd))] +pub fn svlen_u64(_op: svuint64_t) -> u64 { + svcntd() +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv16i8")] + fn _svlsl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svlsl_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svlsl_s8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svlsl_s8_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svlsl_s8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svlsl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svlsl_s8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv8i16")] + fn _svlsl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svlsl_s16_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svlsl_s16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svlsl_s16_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svlsl_s16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svlsl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svlsl_s16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv4i32")] + fn _svlsl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svlsl_s32_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svlsl_s32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svlsl_s32_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svlsl_s32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svlsl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svlsl_s32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv2i64")] + fn _svlsl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svlsl_s64_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svlsl_s64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svlsl_s64_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svlsl_s64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svlsl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svlsl_s64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svlsl_s8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsl_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svlsl_u8_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsl_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svlsl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsl_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svlsl_s16_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsl_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svlsl_u16_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsl_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svlsl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsl_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svlsl_s32_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsl_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svlsl_u32_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsl_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svlsl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsl_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svlsl_s64_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsl_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svlsl_u64_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsl_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svlsl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsl_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsl.wide.nxv16i8" + )] + fn _svlsl_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svint8_t; + } + unsafe { _svlsl_wide_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svlsl_wide_s8_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + svlsl_wide_s8_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svlsl_wide_s8_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + svlsl_wide_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svlsl_wide_s8_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsl.wide.nxv8i16" + )] + fn _svlsl_wide_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svint16_t; + } + unsafe { _svlsl_wide_s16_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svlsl_wide_s16_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + svlsl_wide_s16_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svlsl_wide_s16_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + svlsl_wide_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svlsl_wide_s16_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsl.wide.nxv4i32" + )] + fn _svlsl_wide_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svlsl_wide_s32_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svlsl_wide_s32_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + svlsl_wide_s32_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svlsl_wide_s32_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + svlsl_wide_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svlsl_wide_s32_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + unsafe { svlsl_wide_s8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsl_wide_u8_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + svlsl_wide_u8_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsl_wide_u8_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + svlsl_wide_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsl_wide_u8_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + unsafe { svlsl_wide_s16_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsl_wide_u16_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + svlsl_wide_u16_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsl_wide_u16_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + svlsl_wide_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsl_wide_u16_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + unsafe { svlsl_wide_s32_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsl_wide_u32_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + svlsl_wide_u32_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsl_wide_u32_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + svlsl_wide_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsl_wide_u32_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv16i8")] + fn _svlsr_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svlsr_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svlsr_u8_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svlsr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv8i16")] + fn _svlsr_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svlsr_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svlsr_u16_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svlsr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv4i32")] + fn _svlsr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svlsr_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svlsr_u32_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svlsr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv2i64")] + fn _svlsr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svlsr_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svlsr_u64_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svlsr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsr.wide.nxv16i8" + )] + fn _svlsr_wide_u8_m(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svint8_t; + } + unsafe { _svlsr_wide_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsr_wide_u8_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + svlsr_wide_u8_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsr_wide_u8_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + svlsr_wide_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsr_wide_u8_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsr.wide.nxv8i16" + )] + fn _svlsr_wide_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svint16_t; + } + unsafe { _svlsr_wide_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsr_wide_u16_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + svlsr_wide_u16_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsr_wide_u16_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + svlsr_wide_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsr_wide_u16_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsr.wide.nxv4i32" + )] + fn _svlsr_wide_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svlsr_wide_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsr_wide_u32_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + svlsr_wide_u32_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsr_wide_u32_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + svlsr_wide_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsr_wide_u32_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmad.nxv4f32")] + fn _svmad_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svmad_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmad_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmad_f32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmad_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmad_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmad_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmad.nxv2f64")] + fn _svmad_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svmad_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmad_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmad_f64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmad_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmad_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmad_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv16i8")] + fn _svmad_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svmad_s8_m(pg, op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmad_s8_m(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmad_s8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmad_s8_x(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmad_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmad_s8_z(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv8i16")] + fn _svmad_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) + -> svint16_t; + } + unsafe { _svmad_s16_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmad_s16_m(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmad_s16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmad_s16_x(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmad_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmad_s16_z(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv4i32")] + fn _svmad_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) + -> svint32_t; + } + unsafe { _svmad_s32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmad_s32_m(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmad_s32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmad_s32_x(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmad_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmad_s32_z(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv2i64")] + fn _svmad_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) + -> svint64_t; + } + unsafe { _svmad_s64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmad_s64_m(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmad_s64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmad_s64_x(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmad_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmad_s64_z(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svmad_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmad_u8_m(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmad_u8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmad_u8_x(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmad_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmad_u8_z(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svmad_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmad_u16_m(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmad_u16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmad_u16_x(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmad_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmad_u16_z(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svmad_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmad_u32_m(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmad_u32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmad_u32_x(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmad_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmad_u32_z(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svmad_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmad_u64_m(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmad_u64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmad_u64_x(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmad_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmad_u64_z(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmax.nxv4f32")] + fn _svmax_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmax_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmax_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmax_f32_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmax_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmax_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmax_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmax.nxv2f64")] + fn _svmax_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmax_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmax_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmax_f64_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmax_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmax_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmax_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv16i8")] + fn _svmax_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmax_s8_m(pg, op1, op2) } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmax_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmax_s8_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmax_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmax_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmax_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv8i16")] + fn _svmax_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmax_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmax_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmax_s16_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmax_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmax_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmax_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv4i32")] + fn _svmax_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmax_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmax_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmax_s32_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmax_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmax_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmax_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv2i64")] + fn _svmax_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmax_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmax_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmax_s64_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmax_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmax_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmax_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv16i8")] + fn _svmax_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmax_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmax_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmax_u8_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmax_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmax_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmax_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv8i16")] + fn _svmax_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmax_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmax_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmax_u16_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmax_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmax_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmax_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv4i32")] + fn _svmax_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmax_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmax_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmax_u32_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmax_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmax_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmax_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv2i64")] + fn _svmax_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmax_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmax_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmax_u64_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmax_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmax_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmax_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxnm.nxv4f32")] + fn _svmaxnm_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmaxnm_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmaxnm_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmaxnm_f32_m(pg, op1, op2) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmaxnm_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmaxnm_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmaxnm_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxnm.nxv2f64")] + fn _svmaxnm_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmaxnm_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmaxnm_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmaxnm_f64_m(pg, op1, op2) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmaxnm_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmaxnm_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmaxnm_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum number reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmv[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnmv))] +pub fn svmaxnmv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmaxnmv.nxv4f32" + )] + fn _svmaxnmv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svmaxnmv_f32(pg.sve_into(), op) } +} +#[doc = "Maximum number reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmv[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnmv))] +pub fn svmaxnmv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmaxnmv.nxv2f64" + )] + fn _svmaxnmv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svmaxnmv_f64(pg.sve_into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxv))] +pub fn svmaxv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxv.nxv4f32")] + fn _svmaxv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svmaxv_f32(pg.sve_into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxv))] +pub fn svmaxv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxv.nxv2f64")] + fn _svmaxv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svmaxv_f64(pg.sve_into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxv))] +pub fn svmaxv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv16i8")] + fn _svmaxv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svmaxv_s8(pg, op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxv))] +pub fn svmaxv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv8i16")] + fn _svmaxv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svmaxv_s16(pg.sve_into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxv))] +pub fn svmaxv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv4i32")] + fn _svmaxv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svmaxv_s32(pg.sve_into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxv))] +pub fn svmaxv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv2i64")] + fn _svmaxv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svmaxv_s64(pg.sve_into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxv))] +pub fn svmaxv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv16i8")] + fn _svmaxv_u8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svmaxv_u8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxv))] +pub fn svmaxv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv8i16")] + fn _svmaxv_u16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svmaxv_u16(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxv))] +pub fn svmaxv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv4i32")] + fn _svmaxv_u32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svmaxv_u32(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxv))] +pub fn svmaxv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv2i64")] + fn _svmaxv_u64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svmaxv_u64(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmin.nxv4f32")] + fn _svmin_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmin_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmin_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmin_f32_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmin_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmin_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmin_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmin.nxv2f64")] + fn _svmin_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmin_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmin_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmin_f64_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmin_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmin_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmin_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv16i8")] + fn _svmin_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmin_s8_m(pg, op1, op2) } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmin_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmin_s8_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmin_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmin_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmin_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv8i16")] + fn _svmin_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmin_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmin_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmin_s16_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmin_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmin_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmin_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv4i32")] + fn _svmin_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmin_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmin_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmin_s32_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmin_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmin_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmin_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv2i64")] + fn _svmin_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmin_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmin_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmin_s64_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmin_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmin_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmin_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv16i8")] + fn _svmin_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmin_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmin_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmin_u8_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmin_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmin_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmin_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv8i16")] + fn _svmin_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmin_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmin_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmin_u16_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmin_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmin_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmin_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv4i32")] + fn _svmin_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmin_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmin_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmin_u32_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmin_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmin_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmin_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv2i64")] + fn _svmin_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmin_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmin_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmin_u64_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmin_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmin_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmin_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminnm.nxv4f32")] + fn _svminnm_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svminnm_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svminnm_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svminnm_f32_m(pg, op1, op2) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svminnm_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svminnm_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svminnm_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminnm.nxv2f64")] + fn _svminnm_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svminnm_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svminnm_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svminnm_f64_m(pg, op1, op2) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svminnm_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svminnm_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svminnm_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum number reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmv[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnmv))] +pub fn svminnmv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fminnmv.nxv4f32" + )] + fn _svminnmv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svminnmv_f32(pg.sve_into(), op) } +} +#[doc = "Minimum number reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmv[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnmv))] +pub fn svminnmv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fminnmv.nxv2f64" + )] + fn _svminnmv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svminnmv_f64(pg.sve_into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminv))] +pub fn svminv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminv.nxv4f32")] + fn _svminv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svminv_f32(pg.sve_into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminv))] +pub fn svminv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminv.nxv2f64")] + fn _svminv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svminv_f64(pg.sve_into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminv))] +pub fn svminv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv16i8")] + fn _svminv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svminv_s8(pg, op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminv))] +pub fn svminv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv8i16")] + fn _svminv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svminv_s16(pg.sve_into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminv))] +pub fn svminv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv4i32")] + fn _svminv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svminv_s32(pg.sve_into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminv))] +pub fn svminv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv2i64")] + fn _svminv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svminv_s64(pg.sve_into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminv))] +pub fn svminv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv16i8")] + fn _svminv_u8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svminv_u8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminv))] +pub fn svminv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv8i16")] + fn _svminv_u16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svminv_u16(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminv))] +pub fn svminv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv4i32")] + fn _svminv_u32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svminv_u32(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminv))] +pub fn svminv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv2i64")] + fn _svminv_u64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svminv_u64(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmla.nxv4f32")] + fn _svmla_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svmla_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmla_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmla_f32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmla_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmla_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmla_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmla.nxv2f64")] + fn _svmla_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svmla_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmla_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmla_f64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmla_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmla_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmla_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv16i8")] + fn _svmla_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svmla_s8_m(pg, op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmla_s8_m(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmla_s8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmla_s8_x(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmla_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmla_s8_z(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv8i16")] + fn _svmla_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) + -> svint16_t; + } + unsafe { _svmla_s16_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmla_s16_m(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmla_s16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmla_s16_x(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmla_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmla_s16_z(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv4i32")] + fn _svmla_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) + -> svint32_t; + } + unsafe { _svmla_s32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmla_s32_m(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmla_s32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmla_s32_x(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmla_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmla_s32_z(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv2i64")] + fn _svmla_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) + -> svint64_t; + } + unsafe { _svmla_s64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmla_s64_m(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmla_s64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmla_s64_x(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmla_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmla_s64_z(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svmla_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmla_u8_m(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmla_u8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmla_u8_x(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmla_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmla_u8_z(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svmla_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmla_u16_m(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmla_u16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmla_u16_x(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmla_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmla_u16_z(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svmla_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmla_u32_m(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmla_u32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmla_u32_x(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmla_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmla_u32_z(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svmla_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmla_u64_m(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmla_u64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmla_u64_x(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmla_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmla_u64_z(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla, IMM_INDEX = 0))] +pub fn svmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmla.lane.nxv4f32" + )] + fn _svmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + IMM_INDEX: i32, + ) -> svfloat32_t; + } + unsafe { _svmla_lane_f32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla, IMM_INDEX = 0))] +pub fn svmla_lane_f64( + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmla.lane.nxv2f64" + )] + fn _svmla_lane_f64( + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + IMM_INDEX: i32, + ) -> svfloat64_t; + } + unsafe { _svmla_lane_f64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmls.nxv4f32")] + fn _svmls_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svmls_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmls_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmls_f32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmls_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmls_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmls_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmls.nxv2f64")] + fn _svmls_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svmls_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmls_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmls_f64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmls_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmls_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmls_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv16i8")] + fn _svmls_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svmls_s8_m(pg, op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmls_s8_m(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmls_s8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmls_s8_x(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmls_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmls_s8_z(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv8i16")] + fn _svmls_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) + -> svint16_t; + } + unsafe { _svmls_s16_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmls_s16_m(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmls_s16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmls_s16_x(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmls_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmls_s16_z(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv4i32")] + fn _svmls_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) + -> svint32_t; + } + unsafe { _svmls_s32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmls_s32_m(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmls_s32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmls_s32_x(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmls_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmls_s32_z(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv2i64")] + fn _svmls_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) + -> svint64_t; + } + unsafe { _svmls_s64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmls_s64_m(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmls_s64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmls_s64_x(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmls_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmls_s64_z(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svmls_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmls_u8_m(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmls_u8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmls_u8_x(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmls_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmls_u8_z(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svmls_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmls_u16_m(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmls_u16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmls_u16_x(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmls_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmls_u16_z(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svmls_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmls_u32_m(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmls_u32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmls_u32_x(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmls_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmls_u32_z(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svmls_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmls_u64_m(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmls_u64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmls_u64_x(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmls_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmls_u64_z(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls, IMM_INDEX = 0))] +pub fn svmls_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmls.lane.nxv4f32" + )] + fn _svmls_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + IMM_INDEX: i32, + ) -> svfloat32_t; + } + unsafe { _svmls_lane_f32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls, IMM_INDEX = 0))] +pub fn svmls_lane_f64( + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmls.lane.nxv2f64" + )] + fn _svmls_lane_f64( + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + IMM_INDEX: i32, + ) -> svfloat64_t; + } + unsafe { _svmls_lane_f64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,f32mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmmla))] +pub fn svmmla_f32(op1: svfloat32_t, op2: svfloat32_t, op3: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmmla.nxv4f32")] + fn _svmmla_f32(op1: svfloat32_t, op2: svfloat32_t, op3: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmmla_f32(op1, op2, op3) } +} +#[doc = "Matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmmla))] +pub fn svmmla_f64(op1: svfloat64_t, op2: svfloat64_t, op3: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmmla.nxv2f64")] + fn _svmmla_f64(op1: svfloat64_t, op2: svfloat64_t, op3: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmmla_f64(op1, op2, op3) } +} +#[doc = "Matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smmla))] +pub fn svmmla_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smmla.nxv4i32")] + fn _svmmla_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svmmla_s32(op1, op2, op3) } +} +#[doc = "Matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ummla))] +pub fn svmmla_u32(op1: svuint32_t, op2: svuint8_t, op3: svuint8_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ummla.nxv4i32")] + fn _svmmla_u32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svmmla_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Move"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmov[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svmov_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { + svand_b_z(pg, op, op) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmsb.nxv4f32")] + fn _svmsb_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svmsb_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmsb_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmsb_f32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmsb_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmsb_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmsb_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmsb.nxv2f64")] + fn _svmsb_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svmsb_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmsb_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmsb_f64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmsb_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmsb_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmsb_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv16i8")] + fn _svmsb_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svmsb_s8_m(pg, op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmsb_s8_m(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmsb_s8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmsb_s8_x(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmsb_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmsb_s8_z(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv8i16")] + fn _svmsb_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) + -> svint16_t; + } + unsafe { _svmsb_s16_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmsb_s16_m(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmsb_s16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmsb_s16_x(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmsb_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmsb_s16_z(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv4i32")] + fn _svmsb_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) + -> svint32_t; + } + unsafe { _svmsb_s32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmsb_s32_m(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmsb_s32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmsb_s32_x(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmsb_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmsb_s32_z(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv2i64")] + fn _svmsb_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) + -> svint64_t; + } + unsafe { _svmsb_s64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmsb_s64_m(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmsb_s64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmsb_s64_x(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmsb_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmsb_s64_z(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svmsb_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmsb_u8_m(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmsb_u8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmsb_u8_x(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmsb_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmsb_u8_z(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svmsb_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmsb_u16_m(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmsb_u16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmsb_u16_x(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmsb_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmsb_u16_z(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svmsb_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmsb_u32_m(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmsb_u32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmsb_u32_x(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmsb_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmsb_u32_z(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svmsb_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmsb_u64_m(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmsb_u64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmsb_u64_x(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmsb_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmsb_u64_z(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv4f32")] + fn _svmul_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmul_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmul_f32_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmul_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv2f64")] + fn _svmul_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmul_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmul_f64_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmul_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv16i8")] + fn _svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmul_s8_m(pg, op1, op2) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmul_s8_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmul_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv8i16")] + fn _svmul_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmul_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmul_s16_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmul_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv4i32")] + fn _svmul_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmul_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmul_s32_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmul_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv2i64")] + fn _svmul_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmul_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmul_s64_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmul_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svmul_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmul_u8_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmul_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svmul_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmul_u16_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmul_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svmul_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmul_u32_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmul_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svmul_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmul_u64_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmul_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv16i8")] + fn _svmulh_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmulh_s8_m(pg, op1, op2) } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmulh_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmulh_s8_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmulh_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmulh_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmulh_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv8i16")] + fn _svmulh_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmulh_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmulh_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmulh_s16_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmulh_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmulh_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmulh_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv4i32")] + fn _svmulh_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmulh_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmulh_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmulh_s32_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmulh_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmulh_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmulh_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv2i64")] + fn _svmulh_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmulh_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmulh_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmulh_s64_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmulh_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmulh_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmulh_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv16i8")] + fn _svmulh_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmulh_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmulh_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmulh_u8_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmulh_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmulh_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmulh_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv8i16")] + fn _svmulh_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmulh_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmulh_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmulh_u16_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmulh_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmulh_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmulh_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv4i32")] + fn _svmulh_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmulh_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmulh_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmulh_u32_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmulh_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmulh_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmulh_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv2i64")] + fn _svmulh_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmulh_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmulh_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmulh_u64_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmulh_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmulh_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmulh_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmulx.nxv4f32")] + fn _svmulx_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmulx_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmulx_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmulx_f32_m(pg, op1, op2) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmulx_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmulx_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmulx_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmulx.nxv2f64")] + fn _svmulx_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmulx_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmulx_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmulx_f64_m(pg, op1, op2) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmulx_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmulx_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmulx_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Bitwise NAND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnand[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nand))] +pub fn svnand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nand.z.nxv16i1")] + fn _svnand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svnand_b_z(pg, op1, op2) } +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fneg.nxv4f32")] + fn _svneg_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svneg_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svneg_f32_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svneg_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fneg.nxv2f64")] + fn _svneg_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svneg_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svneg_f64_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svneg_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv16i8")] + fn _svneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svneg_s8_m(inactive, pg, op) } +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svneg_s8_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svneg_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv8i16")] + fn _svneg_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svneg_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svneg_s16_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svneg_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv4i32")] + fn _svneg_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svneg_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svneg_s32_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svneg_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv2i64")] + fn _svneg_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svneg_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svneg_s64_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svneg_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmad.nxv4f32")] + fn _svnmad_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svnmad_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmad_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmad_f32_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmad_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmad_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmad_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmad.nxv2f64")] + fn _svnmad_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svnmad_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmad_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmad_f64_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmad_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmad_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmad_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmla.nxv4f32")] + fn _svnmla_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svnmla_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmla_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmla_f32_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmla_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmla_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmla_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmla.nxv2f64")] + fn _svnmla_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svnmla_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmla_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmla_f64_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmla_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmla_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmla_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmls.nxv4f32")] + fn _svnmls_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svnmls_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmls_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmls_f32_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmls_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmls_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmls_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmls.nxv2f64")] + fn _svnmls_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svnmls_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmls_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmls_f64_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmls_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmls_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmls_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmsb.nxv4f32")] + fn _svnmsb_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svnmsb_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmsb_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmsb_f32_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmsb_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmsb_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmsb_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmsb.nxv2f64")] + fn _svnmsb_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svnmsb_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmsb_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmsb_f64_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmsb_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmsb_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmsb_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Bitwise NOR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnor[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nor))] +pub fn svnor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nor.z.nxv16i1")] + fn _svnor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svnor_b_z(pg, op1, op2) } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { + sveor_b_z(pg, op, pg) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv16i8")] + fn _svnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svnot_s8_m(inactive, pg, op) } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svnot_s8_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svnot_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv8i16")] + fn _svnot_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svnot_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svnot_s16_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svnot_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv4i32")] + fn _svnot_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svnot_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svnot_s32_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svnot_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv2i64")] + fn _svnot_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svnot_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svnot_s64_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svnot_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svnot_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svnot_u8_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svnot_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svnot_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svnot_u16_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svnot_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svnot_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svnot_u32_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svnot_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svnot_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svnot_u64_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svnot_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Bitwise inclusive OR, inverting second argument"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorn[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orn))] +pub fn svorn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orn.z.nvx16i1")] + fn _svorn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svorn_b_z(pg, op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.z.nvx16i1")] + fn _svorr_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svorr_b_z(pg, op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv16i8")] + fn _svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svorr_s8_m(pg, op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svorr_s8_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svorr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv8i16")] + fn _svorr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svorr_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svorr_s16_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svorr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv4i32")] + fn _svorr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svorr_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svorr_s32_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svorr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv2i64")] + fn _svorr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svorr_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svorr_s64_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svorr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svorr_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svorr_u8_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svorr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svorr_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svorr_u16_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svorr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svorr_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svorr_u32_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svorr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svorr_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svorr_u64_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svorr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv16i8")] + fn _svorv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svorv_s8(pg, op) } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv8i16")] + fn _svorv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svorv_s16(pg.sve_into(), op) } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv4i32")] + fn _svorv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svorv_s32(pg.sve_into(), op) } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv2i64")] + fn _svorv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svorv_s64(pg.sve_into(), op) } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { svorv_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { svorv_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { svorv_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { svorv_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Set all predicate elements to false"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpfalse[_b])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pfalse))] +pub fn svpfalse_b() -> svbool_t { + svdupq_n_b8( + false, false, false, false, false, false, false, false, false, false, false, false, false, + false, false, false, + ) +} +#[doc = "Set the first active predicate element to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpfirst[_b])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pfirst))] +pub fn svpfirst_b(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pfirst.nxv16i1")] + fn _svpfirst_b(pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svpfirst_b(pg, op) } +} +#[doc = "Find next active predicate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pnext))] +pub fn svpnext_b8(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv16i1")] + fn _svpnext_b8(pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svpnext_b8(pg, op) } +} +#[doc = "Find next active predicate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pnext))] +pub fn svpnext_b16(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv8i1")] + fn _svpnext_b16(pg: svbool8_t, op: svbool8_t) -> svbool8_t; + } + unsafe { _svpnext_b16(pg.sve_into(), op.sve_into()).sve_into() } +} +#[doc = "Find next active predicate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pnext))] +pub fn svpnext_b32(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv4i1")] + fn _svpnext_b32(pg: svbool4_t, op: svbool4_t) -> svbool4_t; + } + unsafe { _svpnext_b32(pg.sve_into(), op.sve_into()).sve_into() } +} +#[doc = "Find next active predicate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pnext))] +pub fn svpnext_b64(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv2i1")] + fn _svpnext_b64(pg: svbool2_t, op: svbool2_t) -> svbool2_t; + } + unsafe { _svpnext_b64(pg.sve_into(), op.sve_into()).sve_into() } +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb(pg: svbool_t, base: *const T) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv16i1")] + fn _svprfb(pg: svbool_t, base: *const crate::ffi::c_void, op: svprfop); + } + _svprfb(pg, base as *const crate::ffi::c_void, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh(pg: svbool_t, base: *const T) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv8i1")] + fn _svprfh(pg: svbool8_t, base: *const crate::ffi::c_void, op: svprfop); + } + _svprfh(pg.sve_into(), base as *const crate::ffi::c_void, OP) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw(pg: svbool_t, base: *const T) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv4i1")] + fn _svprfw(pg: svbool4_t, base: *const crate::ffi::c_void, op: svprfop); + } + _svprfw(pg.sve_into(), base as *const crate::ffi::c_void, OP) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd(pg: svbool_t, base: *const T) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv2i1")] + fn _svprfd(pg: svbool2_t, base: *const crate::ffi::c_void, op: svprfop); + } + _svprfd(pg.sve_into(), base as *const crate::ffi::c_void, OP) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[s32]offset)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_gather_s32offset( + pg: svbool_t, + base: *const T, + offsets: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.sxtw.index.nxv4i32" + )] + fn _svprfb_gather_s32offset( + pg: svbool4_t, + base: *const crate::ffi::c_void, + offsets: svint32_t, + op: svprfop, + ); + } + _svprfb_gather_s32offset( + pg.sve_into(), + base as *const crate::ffi::c_void, + offsets, + OP, + ) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[s32]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_gather_s32index( + pg: svbool_t, + base: *const T, + indices: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.sxtw.index.nxv4i32" + )] + fn _svprfh_gather_s32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfh_gather_s32index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices, + OP, + ) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[s32]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_gather_s32index( + pg: svbool_t, + base: *const T, + indices: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.sxtw.index.nxv4i32" + )] + fn _svprfw_gather_s32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfw_gather_s32index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices, + OP, + ) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[s32]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_gather_s32index( + pg: svbool_t, + base: *const T, + indices: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.sxtw.index.nxv4i32" + )] + fn _svprfd_gather_s32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfd_gather_s32index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices, + OP, + ) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[s64]offset)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_gather_s64offset( + pg: svbool_t, + base: *const T, + offsets: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.index.nxv2i64" + )] + fn _svprfb_gather_s64offset( + pg: svbool2_t, + base: *const crate::ffi::c_void, + offsets: svint64_t, + op: svprfop, + ); + } + _svprfb_gather_s64offset( + pg.sve_into(), + base as *const crate::ffi::c_void, + offsets, + OP, + ) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[s64]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_gather_s64index( + pg: svbool_t, + base: *const T, + indices: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.index.nxv2i64" + )] + fn _svprfh_gather_s64index( + pg: svbool2_t, + base: *const crate::ffi::c_void, + indices: svint64_t, + op: svprfop, + ); + } + _svprfh_gather_s64index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices, + OP, + ) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[s64]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_gather_s64index( + pg: svbool_t, + base: *const T, + indices: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.index.nxv2i64" + )] + fn _svprfw_gather_s64index( + pg: svbool2_t, + base: *const crate::ffi::c_void, + indices: svint64_t, + op: svprfop, + ); + } + _svprfw_gather_s64index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices, + OP, + ) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[s64]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_gather_s64index( + pg: svbool_t, + base: *const T, + indices: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.index.nxv2i64" + )] + fn _svprfd_gather_s64index( + pg: svbool2_t, + base: *const crate::ffi::c_void, + indices: svint64_t, + op: svprfop, + ); + } + _svprfd_gather_s64index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices, + OP, + ) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[u32]offset)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_gather_u32offset( + pg: svbool_t, + base: *const T, + offsets: svuint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.uxtw.index.nxv4i32" + )] + fn _svprfb_gather_u32offset( + pg: svbool4_t, + base: *const crate::ffi::c_void, + offsets: svint32_t, + op: svprfop, + ); + } + _svprfb_gather_u32offset( + pg.sve_into(), + base as *const crate::ffi::c_void, + offsets.as_signed(), + OP, + ) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[u32]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_gather_u32index( + pg: svbool_t, + base: *const T, + indices: svuint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.uxtw.index.nxv4i32" + )] + fn _svprfh_gather_u32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfh_gather_u32index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices.as_signed(), + OP, + ) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[u32]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_gather_u32index( + pg: svbool_t, + base: *const T, + indices: svuint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.uxtw.index.nxv4i32" + )] + fn _svprfw_gather_u32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfw_gather_u32index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices.as_signed(), + OP, + ) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[u32]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_gather_u32index( + pg: svbool_t, + base: *const T, + indices: svuint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.uxtw.index.nxv4i32" + )] + fn _svprfd_gather_u32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfd_gather_u32index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices.as_signed(), + OP, + ) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[u64]offset)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_gather_u64offset( + pg: svbool_t, + base: *const T, + offsets: svuint64_t, +) { + svprfb_gather_s64offset::(pg, base, offsets.as_signed()) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[u64]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_gather_u64index( + pg: svbool_t, + base: *const T, + indices: svuint64_t, +) { + svprfh_gather_s64index::(pg, base, indices.as_signed()) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[u64]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_gather_u64index( + pg: svbool_t, + base: *const T, + indices: svuint64_t, +) { + svprfw_gather_s64index::(pg, base, indices.as_signed()) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[u64]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_gather_u64index( + pg: svbool_t, + base: *const T, + indices: svuint64_t, +) { + svprfd_gather_s64index::(pg, base, indices.as_signed()) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u32base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfb_gather_u32base(pg: svbool_t, bases: svuint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv4i32" + )] + fn _svprfb_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfb_gather_u32base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u32base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfh_gather_u32base(pg: svbool_t, bases: svuint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv4i32" + )] + fn _svprfh_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfh_gather_u32base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u32base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfw_gather_u32base(pg: svbool_t, bases: svuint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv4i32" + )] + fn _svprfw_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfw_gather_u32base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u32base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfd_gather_u32base(pg: svbool_t, bases: svuint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv4i32" + )] + fn _svprfd_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfd_gather_u32base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u64base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfb_gather_u64base(pg: svbool_t, bases: svuint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv2i64" + )] + fn _svprfb_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfb_gather_u64base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u64base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfh_gather_u64base(pg: svbool_t, bases: svuint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv2i64" + )] + fn _svprfh_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfh_gather_u64base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u64base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfw_gather_u64base(pg: svbool_t, bases: svuint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv2i64" + )] + fn _svprfw_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfw_gather_u64base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u64base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfd_gather_u64base(pg: svbool_t, bases: svuint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv2i64" + )] + fn _svprfd_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfd_gather_u64base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u32base]_offset)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfb_gather_u32base_offset( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv4i32" + )] + fn _svprfb_gather_u32base_offset(pg: svbool4_t, bases: svint32_t, offset: i64, op: svprfop); + } + _svprfb_gather_u32base_offset(pg.sve_into(), bases.as_signed(), offset, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u32base]_index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfh_gather_u32base_index( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv4i32" + )] + fn _svprfh_gather_u32base_index(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfh_gather_u32base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(1), OP) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u32base]_index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfw_gather_u32base_index( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv4i32" + )] + fn _svprfw_gather_u32base_index(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfw_gather_u32base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(2), OP) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u32base]_index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfd_gather_u32base_index( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv4i32" + )] + fn _svprfd_gather_u32base_index(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfd_gather_u32base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(3), OP) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u64base]_offset)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfb_gather_u64base_offset( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv2i64" + )] + fn _svprfb_gather_u64base_offset(pg: svbool2_t, bases: svint64_t, offset: i64, op: svprfop); + } + _svprfb_gather_u64base_offset(pg.sve_into(), bases.as_signed(), offset, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u64base]_index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfh_gather_u64base_index( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv2i64" + )] + fn _svprfh_gather_u64base_index(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfh_gather_u64base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(1), OP) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u64base]_index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfw_gather_u64base_index( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv2i64" + )] + fn _svprfw_gather_u64base_index(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfw_gather_u64base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(2), OP) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u64base]_index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfd_gather_u64base_index( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv2i64" + )] + fn _svprfd_gather_u64base_index(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfd_gather_u64base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(3), OP) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_vnum)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_vnum(pg: svbool_t, base: *const T, vnum: i64) { + svprfb::(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_vnum)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_vnum(pg: svbool_t, base: *const T, vnum: i64) { + svprfh::(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_vnum)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_vnum(pg: svbool_t, base: *const T, vnum: i64) { + svprfw::(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_vnum)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_vnum(pg: svbool_t, base: *const T, vnum: i64) { + svprfd::(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Test whether any active element is true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_any)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ptest))] +pub fn svptest_any(pg: svbool_t, op: svbool_t) -> bool { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ptest.any.nxv16i1" + )] + fn _svptest_any(pg: svbool_t, op: svbool_t) -> bool; + } + unsafe { _svptest_any(pg, op) } +} +#[doc = "Test whether first active element is true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_first)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ptest))] +pub fn svptest_first(pg: svbool_t, op: svbool_t) -> bool { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ptest.first.nxv16i1" + )] + fn _svptest_first(pg: svbool_t, op: svbool_t) -> bool; + } + unsafe { _svptest_first(pg, op) } +} +#[doc = "Test whether last active element is true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_last)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ptest))] +pub fn svptest_last(pg: svbool_t, op: svbool_t) -> bool { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ptest.last.nxv16i1" + )] + fn _svptest_last(pg: svbool_t, op: svbool_t) -> bool; + } + unsafe { _svptest_last(pg, op) } +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ptrue))] +pub fn svptrue_b8() -> svbool_t { + svptrue_pat_b8::<{ svpattern::SV_ALL }>() +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ptrue))] +pub fn svptrue_b16() -> svbool_t { + svptrue_pat_b16::<{ svpattern::SV_ALL }>() +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ptrue))] +pub fn svptrue_b32() -> svbool_t { + svptrue_pat_b32::<{ svpattern::SV_ALL }>() +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ptrue))] +pub fn svptrue_b64() -> svbool_t { + svptrue_pat_b64::<{ svpattern::SV_ALL }>() +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b8() -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv16i1")] + fn _svptrue_pat_b8(pattern: svpattern) -> svbool_t; + } + unsafe { _svptrue_pat_b8(PATTERN) } +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b16() -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv8i1")] + fn _svptrue_pat_b16(pattern: svpattern) -> svbool8_t; + } + unsafe { _svptrue_pat_b16(PATTERN).sve_into() } +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b32() -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv4i1")] + fn _svptrue_pat_b32(pattern: svpattern) -> svbool4_t; + } + unsafe { _svptrue_pat_b32(PATTERN).sve_into() } +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b64() -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv2i1")] + fn _svptrue_pat_b64(pattern: svpattern) -> svbool2_t; + } + unsafe { _svptrue_pat_b64(PATTERN).sve_into() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqadd.x.nxv16i8" + )] + fn _svqadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqadd_s8(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + svqadd_s8(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqadd.x.nxv8i16" + )] + fn _svqadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqadd_s16(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + svqadd_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqadd.x.nxv4i32" + )] + fn _svqadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqadd_s32(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + svqadd_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqadd.x.nxv2i64" + )] + fn _svqadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqadd_s64(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + svqadd_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqadd.x.nxv16i8" + )] + fn _svqadd_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqadd_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svqadd_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqadd.x.nxv8i16" + )] + fn _svqadd_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqadd_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svqadd_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqadd.x.nxv4i32" + )] + fn _svqadd_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqadd_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svqadd_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqadd.x.nxv2i64" + )] + fn _svqadd_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqadd_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svqadd_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecb, IMM_FACTOR = 1))] +pub fn svqdecb_n_s32(op: i32) -> i32 { + svqdecb_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))] +pub fn svqdech_n_s32(op: i32) -> i32 { + svqdech_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_n_s32(op: i32) -> i32 { + svqdecw_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_n_s32(op: i32) -> i32 { + svqdecd_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecb, IMM_FACTOR = 1))] +pub fn svqdecb_n_s64(op: i64) -> i64 { + svqdecb_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))] +pub fn svqdech_n_s64(op: i64) -> i64 { + svqdech_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_n_s64(op: i64) -> i64 { + svqdecw_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_n_s64(op: i64) -> i64 { + svqdecd_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecb, IMM_FACTOR = 1))] +pub fn svqdecb_n_u32(op: u32) -> u32 { + svqdecb_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))] +pub fn svqdech_n_u32(op: u32) -> u32 { + svqdech_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_n_u32(op: u32) -> u32 { + svqdecw_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_n_u32(op: u32) -> u32 { + svqdecd_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecb, IMM_FACTOR = 1))] +pub fn svqdecb_n_u64(op: u64) -> u64 { + svqdecb_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))] +pub fn svqdech_n_u64(op: u64) -> u64 { + svqdech_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_n_u64(op: u64) -> u64 { + svqdecw_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_n_u64(op: u64) -> u64 { + svqdecd_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecb_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecb.n32")] + fn _svqdecb_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecb_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdech.n32")] + fn _svqdech_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdech_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecw.n32")] + fn _svqdecw_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecw_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecd.n32")] + fn _svqdecd_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecd_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecb_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecb.n64")] + fn _svqdecb_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecb_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdech.n64")] + fn _svqdech_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdech_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecw.n64")] + fn _svqdecw_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecw_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecd.n64")] + fn _svqdecd_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecd_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecb_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecb.n32")] + fn _svqdecb_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecb_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdech.n32")] + fn _svqdech_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdech_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecw.n32")] + fn _svqdecw_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecw_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecd.n32")] + fn _svqdecd_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecd_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecb_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecb.n64")] + fn _svqdecb_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecb_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdech.n64")] + fn _svqdech_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdech_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecw.n64")] + fn _svqdecw_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecw_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecd.n64")] + fn _svqdecd_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecd_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_s16( + op: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdech.nxv8i16")] + fn _svqdech_pat_s16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t; + } + unsafe { _svqdech_pat_s16(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_s32( + op: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecw.nxv4i32")] + fn _svqdecw_pat_s32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t; + } + unsafe { _svqdecw_pat_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_s64( + op: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecd.nxv2i64")] + fn _svqdecd_pat_s64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t; + } + unsafe { _svqdecd_pat_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_u16( + op: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdech.nxv8i16")] + fn _svqdech_pat_u16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t; + } + unsafe { _svqdech_pat_u16(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_u32( + op: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecw.nxv4i32")] + fn _svqdecw_pat_u32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t; + } + unsafe { _svqdecw_pat_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_u64( + op: svuint64_t, +) -> svuint64_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecd.nxv2i64")] + fn _svqdecd_pat_u64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t; + } + unsafe { _svqdecd_pat_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))] +pub fn svqdech_s16(op: svint16_t) -> svint16_t { + svqdech_pat_s16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_s32(op: svint32_t) -> svint32_t { + svqdecw_pat_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_s64(op: svint64_t) -> svint64_t { + svqdecd_pat_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))] +pub fn svqdech_u16(op: svuint16_t) -> svuint16_t { + svqdech_pat_u16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_u32(op: svuint32_t) -> svuint32_t { + svqdecw_pat_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_u64(op: svuint64_t) -> svuint64_t { + svqdecd_pat_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s32_b8(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n32.nxv16i1" + )] + fn _svqdecp_n_s32_b8(op: i32, pg: svbool_t) -> i32; + } + unsafe { _svqdecp_n_s32_b8(op, pg) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s32_b16(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n32.nxv8i1" + )] + fn _svqdecp_n_s32_b16(op: i32, pg: svbool8_t) -> i32; + } + unsafe { _svqdecp_n_s32_b16(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s32_b32(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n32.nxv4i1" + )] + fn _svqdecp_n_s32_b32(op: i32, pg: svbool4_t) -> i32; + } + unsafe { _svqdecp_n_s32_b32(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s32_b64(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n32.nxv2i1" + )] + fn _svqdecp_n_s32_b64(op: i32, pg: svbool2_t) -> i32; + } + unsafe { _svqdecp_n_s32_b64(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s64_b8(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n64.nxv16i1" + )] + fn _svqdecp_n_s64_b8(op: i64, pg: svbool_t) -> i64; + } + unsafe { _svqdecp_n_s64_b8(op, pg) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s64_b16(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n64.nxv8i1" + )] + fn _svqdecp_n_s64_b16(op: i64, pg: svbool8_t) -> i64; + } + unsafe { _svqdecp_n_s64_b16(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s64_b32(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n64.nxv4i1" + )] + fn _svqdecp_n_s64_b32(op: i64, pg: svbool4_t) -> i64; + } + unsafe { _svqdecp_n_s64_b32(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s64_b64(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n64.nxv2i1" + )] + fn _svqdecp_n_s64_b64(op: i64, pg: svbool2_t) -> i64; + } + unsafe { _svqdecp_n_s64_b64(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u32_b8(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n32.nxv16i1" + )] + fn _svqdecp_n_u32_b8(op: i32, pg: svbool_t) -> i32; + } + unsafe { _svqdecp_n_u32_b8(op.as_signed(), pg).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u32_b16(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n32.nxv8i1" + )] + fn _svqdecp_n_u32_b16(op: i32, pg: svbool8_t) -> i32; + } + unsafe { _svqdecp_n_u32_b16(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u32_b32(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n32.nxv4i1" + )] + fn _svqdecp_n_u32_b32(op: i32, pg: svbool4_t) -> i32; + } + unsafe { _svqdecp_n_u32_b32(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u32_b64(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n32.nxv2i1" + )] + fn _svqdecp_n_u32_b64(op: i32, pg: svbool2_t) -> i32; + } + unsafe { _svqdecp_n_u32_b64(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u64_b8(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n64.nxv16i1" + )] + fn _svqdecp_n_u64_b8(op: i64, pg: svbool_t) -> i64; + } + unsafe { _svqdecp_n_u64_b8(op.as_signed(), pg).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u64_b16(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n64.nxv8i1" + )] + fn _svqdecp_n_u64_b16(op: i64, pg: svbool8_t) -> i64; + } + unsafe { _svqdecp_n_u64_b16(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u64_b32(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n64.nxv4i1" + )] + fn _svqdecp_n_u64_b32(op: i64, pg: svbool4_t) -> i64; + } + unsafe { _svqdecp_n_u64_b32(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u64_b64(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n64.nxv2i1" + )] + fn _svqdecp_n_u64_b64(op: i64, pg: svbool2_t) -> i64; + } + unsafe { _svqdecp_n_u64_b64(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_s16(op: svint16_t, pg: svbool_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecp.nxv8i16")] + fn _svqdecp_s16(op: svint16_t, pg: svbool8_t) -> svint16_t; + } + unsafe { _svqdecp_s16(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_s32(op: svint32_t, pg: svbool_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecp.nxv4i32")] + fn _svqdecp_s32(op: svint32_t, pg: svbool4_t) -> svint32_t; + } + unsafe { _svqdecp_s32(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_s64(op: svint64_t, pg: svbool_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecp.nxv2i64")] + fn _svqdecp_s64(op: svint64_t, pg: svbool2_t) -> svint64_t; + } + unsafe { _svqdecp_s64(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_u16(op: svuint16_t, pg: svbool_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecp.nxv8i16")] + fn _svqdecp_u16(op: svint16_t, pg: svbool8_t) -> svint16_t; + } + unsafe { _svqdecp_u16(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_u32(op: svuint32_t, pg: svbool_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecp.nxv4i32")] + fn _svqdecp_u32(op: svint32_t, pg: svbool4_t) -> svint32_t; + } + unsafe { _svqdecp_u32(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_u64(op: svuint64_t, pg: svbool_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecp.nxv2i64")] + fn _svqdecp_u64(op: svint64_t, pg: svbool2_t) -> svint64_t; + } + unsafe { _svqdecp_u64(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincb, IMM_FACTOR = 1))] +pub fn svqincb_n_s32(op: i32) -> i32 { + svqincb_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))] +pub fn svqinch_n_s32(op: i32) -> i32 { + svqinch_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))] +pub fn svqincw_n_s32(op: i32) -> i32 { + svqincw_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))] +pub fn svqincd_n_s32(op: i32) -> i32 { + svqincd_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincb, IMM_FACTOR = 1))] +pub fn svqincb_n_s64(op: i64) -> i64 { + svqincb_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))] +pub fn svqinch_n_s64(op: i64) -> i64 { + svqinch_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))] +pub fn svqincw_n_s64(op: i64) -> i64 { + svqincw_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))] +pub fn svqincd_n_s64(op: i64) -> i64 { + svqincd_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincb, IMM_FACTOR = 1))] +pub fn svqincb_n_u32(op: u32) -> u32 { + svqincb_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))] +pub fn svqinch_n_u32(op: u32) -> u32 { + svqinch_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))] +pub fn svqincw_n_u32(op: u32) -> u32 { + svqincw_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))] +pub fn svqincd_n_u32(op: u32) -> u32 { + svqincd_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincb, IMM_FACTOR = 1))] +pub fn svqincb_n_u64(op: u64) -> u64 { + svqincb_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))] +pub fn svqinch_n_u64(op: u64) -> u64 { + svqinch_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))] +pub fn svqincw_n_u64(op: u64) -> u64 { + svqincw_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))] +pub fn svqincd_n_u64(op: u64) -> u64 { + svqincd_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincb_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincb.n32")] + fn _svqincb_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincb_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqinch.n32")] + fn _svqinch_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqinch_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincw.n32")] + fn _svqincw_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincw_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincd.n32")] + fn _svqincd_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincd_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincb_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincb.n64")] + fn _svqincb_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincb_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqinch.n64")] + fn _svqinch_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqinch_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincw.n64")] + fn _svqincw_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincw_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincd.n64")] + fn _svqincd_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincd_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincb_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincb.n32")] + fn _svqincb_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincb_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqinch.n32")] + fn _svqinch_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqinch_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincw.n32")] + fn _svqincw_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincw_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincd.n32")] + fn _svqincd_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincd_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincb_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincb.n64")] + fn _svqincb_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincb_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqinch.n64")] + fn _svqinch_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqinch_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincw.n64")] + fn _svqincw_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincw_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincd.n64")] + fn _svqincd_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincd_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_s16( + op: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqinch.nxv8i16")] + fn _svqinch_pat_s16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t; + } + unsafe { _svqinch_pat_s16(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_s32( + op: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincw.nxv4i32")] + fn _svqincw_pat_s32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t; + } + unsafe { _svqincw_pat_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_s64( + op: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincd.nxv2i64")] + fn _svqincd_pat_s64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t; + } + unsafe { _svqincd_pat_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_u16( + op: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqinch.nxv8i16")] + fn _svqinch_pat_u16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t; + } + unsafe { _svqinch_pat_u16(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_u32( + op: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincw.nxv4i32")] + fn _svqincw_pat_u32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t; + } + unsafe { _svqincw_pat_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_u64( + op: svuint64_t, +) -> svuint64_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincd.nxv2i64")] + fn _svqincd_pat_u64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t; + } + unsafe { _svqincd_pat_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))] +pub fn svqinch_s16(op: svint16_t) -> svint16_t { + svqinch_pat_s16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))] +pub fn svqincw_s32(op: svint32_t) -> svint32_t { + svqincw_pat_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))] +pub fn svqincd_s64(op: svint64_t) -> svint64_t { + svqincd_pat_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))] +pub fn svqinch_u16(op: svuint16_t) -> svuint16_t { + svqinch_pat_u16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))] +pub fn svqincw_u32(op: svuint32_t) -> svuint32_t { + svqincw_pat_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))] +pub fn svqincd_u64(op: svuint64_t) -> svuint64_t { + svqincd_pat_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s32_b8(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n32.nxv16i1" + )] + fn _svqincp_n_s32_b8(op: i32, pg: svbool_t) -> i32; + } + unsafe { _svqincp_n_s32_b8(op, pg) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s32_b16(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n32.nxv8i1" + )] + fn _svqincp_n_s32_b16(op: i32, pg: svbool8_t) -> i32; + } + unsafe { _svqincp_n_s32_b16(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s32_b32(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n32.nxv4i1" + )] + fn _svqincp_n_s32_b32(op: i32, pg: svbool4_t) -> i32; + } + unsafe { _svqincp_n_s32_b32(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s32_b64(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n32.nxv2i1" + )] + fn _svqincp_n_s32_b64(op: i32, pg: svbool2_t) -> i32; + } + unsafe { _svqincp_n_s32_b64(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s64_b8(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n64.nxv16i1" + )] + fn _svqincp_n_s64_b8(op: i64, pg: svbool_t) -> i64; + } + unsafe { _svqincp_n_s64_b8(op, pg) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s64_b16(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n64.nxv8i1" + )] + fn _svqincp_n_s64_b16(op: i64, pg: svbool8_t) -> i64; + } + unsafe { _svqincp_n_s64_b16(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s64_b32(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n64.nxv4i1" + )] + fn _svqincp_n_s64_b32(op: i64, pg: svbool4_t) -> i64; + } + unsafe { _svqincp_n_s64_b32(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s64_b64(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n64.nxv2i1" + )] + fn _svqincp_n_s64_b64(op: i64, pg: svbool2_t) -> i64; + } + unsafe { _svqincp_n_s64_b64(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u32_b8(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n32.nxv16i1" + )] + fn _svqincp_n_u32_b8(op: i32, pg: svbool_t) -> i32; + } + unsafe { _svqincp_n_u32_b8(op.as_signed(), pg).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u32_b16(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n32.nxv8i1" + )] + fn _svqincp_n_u32_b16(op: i32, pg: svbool8_t) -> i32; + } + unsafe { _svqincp_n_u32_b16(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u32_b32(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n32.nxv4i1" + )] + fn _svqincp_n_u32_b32(op: i32, pg: svbool4_t) -> i32; + } + unsafe { _svqincp_n_u32_b32(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u32_b64(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n32.nxv2i1" + )] + fn _svqincp_n_u32_b64(op: i32, pg: svbool2_t) -> i32; + } + unsafe { _svqincp_n_u32_b64(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u64_b8(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n64.nxv16i1" + )] + fn _svqincp_n_u64_b8(op: i64, pg: svbool_t) -> i64; + } + unsafe { _svqincp_n_u64_b8(op.as_signed(), pg).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u64_b16(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n64.nxv8i1" + )] + fn _svqincp_n_u64_b16(op: i64, pg: svbool8_t) -> i64; + } + unsafe { _svqincp_n_u64_b16(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u64_b32(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n64.nxv4i1" + )] + fn _svqincp_n_u64_b32(op: i64, pg: svbool4_t) -> i64; + } + unsafe { _svqincp_n_u64_b32(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u64_b64(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n64.nxv2i1" + )] + fn _svqincp_n_u64_b64(op: i64, pg: svbool2_t) -> i64; + } + unsafe { _svqincp_n_u64_b64(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_s16(op: svint16_t, pg: svbool_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincp.nxv8i16")] + fn _svqincp_s16(op: svint16_t, pg: svbool8_t) -> svint16_t; + } + unsafe { _svqincp_s16(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_s32(op: svint32_t, pg: svbool_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincp.nxv4i32")] + fn _svqincp_s32(op: svint32_t, pg: svbool4_t) -> svint32_t; + } + unsafe { _svqincp_s32(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_s64(op: svint64_t, pg: svbool_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincp.nxv2i64")] + fn _svqincp_s64(op: svint64_t, pg: svbool2_t) -> svint64_t; + } + unsafe { _svqincp_s64(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_u16(op: svuint16_t, pg: svbool_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincp.nxv8i16")] + fn _svqincp_u16(op: svint16_t, pg: svbool8_t) -> svint16_t; + } + unsafe { _svqincp_u16(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_u32(op: svuint32_t, pg: svbool_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincp.nxv4i32")] + fn _svqincp_u32(op: svint32_t, pg: svbool4_t) -> svint32_t; + } + unsafe { _svqincp_u32(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_u64(op: svuint64_t, pg: svbool_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincp.nxv2i64")] + fn _svqincp_u64(op: svint64_t, pg: svbool2_t) -> svint64_t; + } + unsafe { _svqincp_u64(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqsub.x.nxv16i8" + )] + fn _svqsub_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsub_s8(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + svqsub_s8(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqsub.x.nxv8i16" + )] + fn _svqsub_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsub_s16(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + svqsub_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqsub.x.nxv4i32" + )] + fn _svqsub_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsub_s32(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + svqsub_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqsub.x.nxv2i64" + )] + fn _svqsub_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsub_s64(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + svqsub_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqsub.x.nxv16i8" + )] + fn _svqsub_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsub_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svqsub_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqsub.x.nxv8i16" + )] + fn _svqsub_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsub_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svqsub_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqsub.x.nxv4i32" + )] + fn _svqsub_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsub_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svqsub_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqsub.x.nxv2i64" + )] + fn _svqsub_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsub_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svqsub_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv16i8")] + fn _svrbit_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svrbit_s8_m(inactive, pg, op) } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svrbit_s8_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svrbit_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv8i16")] + fn _svrbit_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svrbit_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svrbit_s16_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svrbit_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv4i32")] + fn _svrbit_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrbit_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svrbit_s32_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svrbit_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv2i64")] + fn _svrbit_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svrbit_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svrbit_s64_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svrbit_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svrbit_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svrbit_u8_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svrbit_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svrbit_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svrbit_u16_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svrbit_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svrbit_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrbit_u32_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrbit_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svrbit_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrbit_u64_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrbit_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Read FFR, returning predicate of succesfully loaded elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrdffr)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rdffr))] +pub fn svrdffr() -> svbool_t { + svrdffr_z(svptrue_b8()) +} +#[doc = "Read FFR, returning predicate of succesfully loaded elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrdffr_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rdffr))] +pub fn svrdffr_z(pg: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rdffr.z")] + fn _svrdffr_z(pg: svbool_t) -> svbool_t; + } + unsafe { _svrdffr_z(pg) } +} +#[doc = "Reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpe))] +pub fn svrecpe_f32(op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpe.x.nxv4f32" + )] + fn _svrecpe_f32(op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrecpe_f32(op) } +} +#[doc = "Reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpe))] +pub fn svrecpe_f64(op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpe.x.nxv2f64" + )] + fn _svrecpe_f64(op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrecpe_f64(op) } +} +#[doc = "Reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecps[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecps))] +pub fn svrecps_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecps.x.nxv4f32" + )] + fn _svrecps_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrecps_f32(op1, op2) } +} +#[doc = "Reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecps[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecps))] +pub fn svrecps_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecps.x.nxv2f64" + )] + fn _svrecps_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrecps_f64(op1, op2) } +} +#[doc = "Reciprocal exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpx.x.nxv4f32" + )] + fn _svrecpx_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrecpx_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reciprocal exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrecpx_f32_m(op, pg, op) +} +#[doc = "Reciprocal exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrecpx_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Reciprocal exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpx.x.nxv2f64" + )] + fn _svrecpx_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrecpx_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reciprocal exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrecpx_f64_m(op, pg, op) +} +#[doc = "Reciprocal exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrecpx_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_f32(op: svfloat32_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_f64(op: svfloat64_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_s8(op: svint8_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_s16(op: svint16_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_s32(op: svint32_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_s64(op: svint64_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_u8(op: svuint8_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_u16(op: svuint16_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_u32(op: svuint32_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_u64(op: svuint64_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_f32(op: svfloat32_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_f64(op: svfloat64_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_s8(op: svint8_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_s16(op: svint16_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_s32(op: svint32_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_s64(op: svint64_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_u8(op: svuint8_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_u16(op: svuint16_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_u32(op: svuint32_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_u64(op: svuint64_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_f32(op: svfloat32_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_f64(op: svfloat64_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_s8(op: svint8_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_s16(op: svint16_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_s32(op: svint32_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_s64(op: svint64_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_u8(op: svuint8_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_u16(op: svuint16_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_u32(op: svuint32_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_u64(op: svuint64_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_f32(op: svfloat32_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_f64(op: svfloat64_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_s8(op: svint8_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_s16(op: svint16_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_s32(op: svint32_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_s64(op: svint64_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_u8(op: svuint8_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_u16(op: svuint16_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_u32(op: svuint32_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_u64(op: svuint64_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_f32(op: svfloat32_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_f64(op: svfloat64_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_s8(op: svint8_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_s16(op: svint16_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_s32(op: svint32_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_s64(op: svint64_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_u8(op: svuint8_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_u16(op: svuint16_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_u32(op: svuint32_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_u64(op: svuint64_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_f32(op: svfloat32_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_f64(op: svfloat64_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_s8(op: svint8_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_s16(op: svint16_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_s32(op: svint32_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_s64(op: svint64_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_u8(op: svuint8_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_u16(op: svuint16_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_u32(op: svuint32_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_u64(op: svuint64_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_f32(op: svfloat32_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_f64(op: svfloat64_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_s8(op: svint8_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_s16(op: svint16_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_s32(op: svint32_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_s64(op: svint64_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_u8(op: svuint8_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_u16(op: svuint16_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_u32(op: svuint32_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_u64(op: svuint64_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_f32(op: svfloat32_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_f64(op: svfloat64_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_s8(op: svint8_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_s16(op: svint16_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_s32(op: svint32_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_s64(op: svint64_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_u8(op: svuint8_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_u16(op: svuint16_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_u32(op: svuint32_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_u64(op: svuint64_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_f32(op: svfloat32_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_f64(op: svfloat64_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_s8(op: svint8_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_s16(op: svint16_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_s32(op: svint32_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_s64(op: svint64_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_u8(op: svuint8_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_u16(op: svuint16_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_u32(op: svuint32_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_u64(op: svuint64_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_f32(op: svfloat32_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_f64(op: svfloat64_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_s8(op: svint8_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_s16(op: svint16_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_s32(op: svint32_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_s64(op: svint64_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_u8(op: svuint8_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_u16(op: svuint16_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_u32(op: svuint32_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_u64(op: svuint64_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_b8(op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv16i1")] + fn _svrev_b8(op: svbool_t) -> svbool_t; + } + unsafe { _svrev_b8(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_b16(op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv8i1")] + fn _svrev_b16(op: svbool8_t) -> svbool8_t; + } + unsafe { _svrev_b16(op.sve_into()).sve_into() } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_b32(op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4i1")] + fn _svrev_b32(op: svbool4_t) -> svbool4_t; + } + unsafe { _svrev_b32(op.sve_into()).sve_into() } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_b64(op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2i1")] + fn _svrev_b64(op: svbool2_t) -> svbool2_t; + } + unsafe { _svrev_b64(op.sve_into()).sve_into() } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_f32(op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4f32")] + fn _svrev_f32(op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrev_f32(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_f64(op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2f64")] + fn _svrev_f64(op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrev_f64(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_s8(op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv16i8")] + fn _svrev_s8(op: svint8_t) -> svint8_t; + } + unsafe { _svrev_s8(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_s16(op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv8i16")] + fn _svrev_s16(op: svint16_t) -> svint16_t; + } + unsafe { _svrev_s16(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_s32(op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4i32")] + fn _svrev_s32(op: svint32_t) -> svint32_t; + } + unsafe { _svrev_s32(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_s64(op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2i64")] + fn _svrev_s64(op: svint64_t) -> svint64_t; + } + unsafe { _svrev_s64(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_u8(op: svuint8_t) -> svuint8_t { + unsafe { svrev_s8(op.as_signed()).as_unsigned() } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_u16(op: svuint16_t) -> svuint16_t { + unsafe { svrev_s16(op.as_signed()).as_unsigned() } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_u32(op: svuint32_t) -> svuint32_t { + unsafe { svrev_s32(op.as_signed()).as_unsigned() } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_u64(op: svuint64_t) -> svuint64_t { + unsafe { svrev_s64(op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revb.nxv8i16")] + fn _svrevb_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svrevb_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svrevb_s16_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svrevb_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revb.nxv4i32")] + fn _svrevb_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrevb_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svrevb_s32_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svrevb_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revb.nxv2i64")] + fn _svrevb_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svrevb_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevb_s64_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevb_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svrevb_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svrevb_u16_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svrevb_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svrevb_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrevb_u32_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrevb_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svrevb_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevb_u64_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevb_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revh.nxv4i32")] + fn _svrevh_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrevh_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svrevh_s32_m(op, pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svrevh_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revh.nxv2i64")] + fn _svrevh_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svrevh_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevh_s64_m(op, pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevh_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svrevh_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrevh_u32_m(op, pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrevh_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svrevh_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevh_u64_m(op, pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevh_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Reverse words within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revw.nxv2i64")] + fn _svrevw_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svrevw_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse words within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevw_s64_m(op, pg, op) +} +#[doc = "Reverse words within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevw_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Reverse words within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svrevw_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse words within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevw_u64_m(op, pg, op) +} +#[doc = "Reverse words within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevw_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinta.nxv4f32")] + fn _svrinta_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrinta_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrinta_f32_m(op, pg, op) +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrinta_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinta.nxv2f64")] + fn _svrinta_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrinta_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrinta_f64_m(op, pg, op) +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrinta_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinti.nxv4f32")] + fn _svrinti_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrinti_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrinti_f32_m(op, pg, op) +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrinti_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinti.nxv2f64")] + fn _svrinti_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrinti_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrinti_f64_m(op, pg, op) +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrinti_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round towards -∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintm.nxv4f32")] + fn _svrintm_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintm_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round towards -∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintm_f32_m(op, pg, op) +} +#[doc = "Round towards -∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintm_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round towards -∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintm.nxv2f64")] + fn _svrintm_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintm_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round towards -∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintm_f64_m(op, pg, op) +} +#[doc = "Round towards -∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintm_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round to nearest, ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintn.nxv4f32")] + fn _svrintn_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintn_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round to nearest, ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintn_f32_m(op, pg, op) +} +#[doc = "Round to nearest, ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintn_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round to nearest, ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintn.nxv2f64")] + fn _svrintn_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintn_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round to nearest, ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintn_f64_m(op, pg, op) +} +#[doc = "Round to nearest, ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintn_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round towards +∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintp.nxv4f32")] + fn _svrintp_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintp_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round towards +∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintp_f32_m(op, pg, op) +} +#[doc = "Round towards +∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintp_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round towards +∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintp.nxv2f64")] + fn _svrintp_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintp_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round towards +∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintp_f64_m(op, pg, op) +} +#[doc = "Round towards +∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintp_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintx.nxv4f32")] + fn _svrintx_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintx_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintx_f32_m(op, pg, op) +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintx_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintx.nxv2f64")] + fn _svrintx_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintx_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintx_f64_m(op, pg, op) +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintx_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round towards zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintz.nxv4f32")] + fn _svrintz_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintz_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round towards zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintz_f32_m(op, pg, op) +} +#[doc = "Round towards zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintz_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round towards zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintz.nxv2f64")] + fn _svrintz_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintz_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round towards zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintz_f64_m(op, pg, op) +} +#[doc = "Round towards zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintz_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frsqrte))] +pub fn svrsqrte_f32(op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrte.x.nxv4f32" + )] + fn _svrsqrte_f32(op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrsqrte_f32(op) } +} +#[doc = "Reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frsqrte))] +pub fn svrsqrte_f64(op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrte.x.nxv2f64" + )] + fn _svrsqrte_f64(op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrsqrte_f64(op) } +} +#[doc = "Reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrts[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frsqrts))] +pub fn svrsqrts_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrts.x.nxv4f32" + )] + fn _svrsqrts_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrsqrts_f32(op1, op2) } +} +#[doc = "Reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrts[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frsqrts))] +pub fn svrsqrts_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrts.x.nxv2f64" + )] + fn _svrsqrts_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrsqrts_f64(op1, op2) } +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fscale.nxv4f32")] + fn _svscale_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t; + } + unsafe { _svscale_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t { + svscale_f32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t { + svscale_f32_m(pg, op1, op2) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t { + svscale_f32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t { + svscale_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t { + svscale_f32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fscale.nxv2f64")] + fn _svscale_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t; + } + unsafe { _svscale_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t { + svscale_f64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t { + svscale_f64_m(pg, op1, op2) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t { + svscale_f64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t { + svscale_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t { + svscale_f64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_b])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_b(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe { simd_select(pg, op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe { simd_select::(pg, op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { simd_select::(pg, op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_f32(tuple: svfloat32x2_t, x: svfloat32_t) -> svfloat32x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_f64(tuple: svfloat64x2_t, x: svfloat64_t) -> svfloat64x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_s8(tuple: svint8x2_t, x: svint8_t) -> svint8x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_s16(tuple: svint16x2_t, x: svint16_t) -> svint16x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_s32(tuple: svint32x2_t, x: svint32_t) -> svint32x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_s64(tuple: svint64x2_t, x: svint64_t) -> svint64x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_u8(tuple: svuint8x2_t, x: svuint8_t) -> svuint8x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_u16(tuple: svuint16x2_t, x: svuint16_t) -> svuint16x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_u32(tuple: svuint32x2_t, x: svuint32_t) -> svuint32x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_u64(tuple: svuint64x2_t, x: svuint64_t) -> svuint64x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_f32(tuple: svfloat32x3_t, x: svfloat32_t) -> svfloat32x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_f64(tuple: svfloat64x3_t, x: svfloat64_t) -> svfloat64x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_s8(tuple: svint8x3_t, x: svint8_t) -> svint8x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_s16(tuple: svint16x3_t, x: svint16_t) -> svint16x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_s32(tuple: svint32x3_t, x: svint32_t) -> svint32x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_s64(tuple: svint64x3_t, x: svint64_t) -> svint64x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_u8(tuple: svuint8x3_t, x: svuint8_t) -> svuint8x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_u16(tuple: svuint16x3_t, x: svuint16_t) -> svuint16x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_u32(tuple: svuint32x3_t, x: svuint32_t) -> svuint32x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_u64(tuple: svuint64x3_t, x: svuint64_t) -> svuint64x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_f32(tuple: svfloat32x4_t, x: svfloat32_t) -> svfloat32x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_f64(tuple: svfloat64x4_t, x: svfloat64_t) -> svfloat64x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_s8(tuple: svint8x4_t, x: svint8_t) -> svint8x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_s16(tuple: svint16x4_t, x: svint16_t) -> svint16x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_s32(tuple: svint32x4_t, x: svint32_t) -> svint32x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_s64(tuple: svint64x4_t, x: svint64_t) -> svint64x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_u8(tuple: svuint8x4_t, x: svuint8_t) -> svuint8x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_u16(tuple: svuint16x4_t, x: svuint16_t) -> svuint16x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_u32(tuple: svuint32x4_t, x: svuint32_t) -> svuint32x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_u64(tuple: svuint64x4_t, x: svuint64_t) -> svuint64x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Initialize the first-fault register to all-true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsetffr)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(setffr))] +pub fn svsetffr() { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.setffr")] + fn _svsetffr(); + } + unsafe { _svsetffr() } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv4f32")] + fn _svsplice_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svsplice_f32(pg.sve_into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv2f64")] + fn _svsplice_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsplice_f64(pg.sve_into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv16i8")] + fn _svsplice_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svsplice_s8(pg, op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv8i16")] + fn _svsplice_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsplice_s16(pg.sve_into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv4i32")] + fn _svsplice_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsplice_s32(pg.sve_into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv2i64")] + fn _svsplice_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svsplice_s64(pg.sve_into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svsplice_s8(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svsplice_s16(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svsplice_s32(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svsplice_s64(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Square root"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsqrt.nxv4f32")] + fn _svsqrt_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svsqrt_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Square root"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svsqrt_f32_m(op, pg, op) +} +#[doc = "Square root"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svsqrt_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Square root"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsqrt.nxv2f64")] + fn _svsqrt_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsqrt_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Square root"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svsqrt_f64_m(op, pg, op) +} +#[doc = "Square root"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svsqrt_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_f32(pg: svbool_t, base: *mut f32, data: svfloat32_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4f32")] + fn _svst1_f32(data: svfloat32_t, pg: svbool4_t, ptr: *mut f32); + } + _svst1_f32(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_f64(pg: svbool_t, base: *mut f64, data: svfloat64_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2f64")] + fn _svst1_f64(data: svfloat64_t, pg: svbool2_t, ptr: *mut f64); + } + _svst1_f64(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_s8(pg: svbool_t, base: *mut i8, data: svint8_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv16i8")] + fn _svst1_s8(data: svint8_t, pg: svbool_t, ptr: *mut i8); + } + _svst1_s8(data, pg, base) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_s16(pg: svbool_t, base: *mut i16, data: svint16_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv8i16")] + fn _svst1_s16(data: svint16_t, pg: svbool8_t, ptr: *mut i16); + } + _svst1_s16(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_s32(pg: svbool_t, base: *mut i32, data: svint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i32")] + fn _svst1_s32(data: svint32_t, pg: svbool4_t, ptr: *mut i32); + } + _svst1_s32(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_s64(pg: svbool_t, base: *mut i64, data: svint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i64")] + fn _svst1_s64(data: svint64_t, pg: svbool2_t, ptr: *mut i64); + } + _svst1_s64(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_u8(pg: svbool_t, base: *mut u8, data: svuint8_t) { + svst1_s8(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_u16(pg: svbool_t, base: *mut u16, data: svuint16_t) { + svst1_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_u32(pg: svbool_t, base: *mut u32, data: svuint32_t) { + svst1_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_u64(pg: svbool_t, base: *mut u64, data: svuint64_t) { + svst1_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32index_f32( + pg: svbool_t, + base: *mut f32, + indices: svint32_t, + data: svfloat32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4f32" + )] + fn _svst1_scatter_s32index_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + indices: svint32_t, + ); + } + _svst1_scatter_s32index_f32(data, pg.sve_into(), base, indices) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32index_s32( + pg: svbool_t, + base: *mut i32, + indices: svint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4i32" + )] + fn _svst1_scatter_s32index_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + indices: svint32_t, + ); + } + _svst1_scatter_s32index_s32(data, pg.sve_into(), base, indices) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32index_u32( + pg: svbool_t, + base: *mut u32, + indices: svint32_t, + data: svuint32_t, +) { + svst1_scatter_s32index_s32(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64index_f64( + pg: svbool_t, + base: *mut f64, + indices: svint64_t, + data: svfloat64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2f64" + )] + fn _svst1_scatter_s64index_f64( + data: svfloat64_t, + pg: svbool2_t, + base: *mut f64, + indices: svint64_t, + ); + } + _svst1_scatter_s64index_f64(data, pg.sve_into(), base, indices) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64index_s64( + pg: svbool_t, + base: *mut i64, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i64" + )] + fn _svst1_scatter_s64index_s64( + data: svint64_t, + pg: svbool2_t, + base: *mut i64, + indices: svint64_t, + ); + } + _svst1_scatter_s64index_s64(data, pg.sve_into(), base, indices) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64index_u64( + pg: svbool_t, + base: *mut u64, + indices: svint64_t, + data: svuint64_t, +) { + svst1_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32index_f32( + pg: svbool_t, + base: *mut f32, + indices: svuint32_t, + data: svfloat32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4f32" + )] + fn _svst1_scatter_u32index_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + indices: svint32_t, + ); + } + _svst1_scatter_u32index_f32(data, pg.sve_into(), base, indices.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32index_s32( + pg: svbool_t, + base: *mut i32, + indices: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i32" + )] + fn _svst1_scatter_u32index_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + indices: svint32_t, + ); + } + _svst1_scatter_u32index_s32(data, pg.sve_into(), base, indices.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32index_u32( + pg: svbool_t, + base: *mut u32, + indices: svuint32_t, + data: svuint32_t, +) { + svst1_scatter_u32index_s32(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64index_f64( + pg: svbool_t, + base: *mut f64, + indices: svuint64_t, + data: svfloat64_t, +) { + svst1_scatter_s64index_f64(pg, base, indices.as_signed(), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64index_s64( + pg: svbool_t, + base: *mut i64, + indices: svuint64_t, + data: svint64_t, +) { + svst1_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64index_u64( + pg: svbool_t, + base: *mut u64, + indices: svuint64_t, + data: svuint64_t, +) { + svst1_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32offset_f32( + pg: svbool_t, + base: *mut f32, + offsets: svint32_t, + data: svfloat32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4f32" + )] + fn _svst1_scatter_s32offset_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + offsets: svint32_t, + ); + } + _svst1_scatter_s32offset_f32(data, pg.sve_into(), base, offsets) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32offset_s32( + pg: svbool_t, + base: *mut i32, + offsets: svint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4i32" + )] + fn _svst1_scatter_s32offset_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + offsets: svint32_t, + ); + } + _svst1_scatter_s32offset_s32(data, pg.sve_into(), base, offsets) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32offset_u32( + pg: svbool_t, + base: *mut u32, + offsets: svint32_t, + data: svuint32_t, +) { + svst1_scatter_s32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64offset_f64( + pg: svbool_t, + base: *mut f64, + offsets: svint64_t, + data: svfloat64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2f64" + )] + fn _svst1_scatter_s64offset_f64( + data: svfloat64_t, + pg: svbool2_t, + base: *mut f64, + offsets: svint64_t, + ); + } + _svst1_scatter_s64offset_f64(data, pg.sve_into(), base, offsets) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i64, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2i64" + )] + fn _svst1_scatter_s64offset_s64( + data: svint64_t, + pg: svbool2_t, + base: *mut i64, + offsets: svint64_t, + ); + } + _svst1_scatter_s64offset_s64(data, pg.sve_into(), base, offsets) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u64, + offsets: svint64_t, + data: svuint64_t, +) { + svst1_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32offset_f32( + pg: svbool_t, + base: *mut f32, + offsets: svuint32_t, + data: svfloat32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4f32" + )] + fn _svst1_scatter_u32offset_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + offsets: svint32_t, + ); + } + _svst1_scatter_u32offset_f32(data, pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i32, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4i32" + )] + fn _svst1_scatter_u32offset_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + offsets: svint32_t, + ); + } + _svst1_scatter_u32offset_s32(data, pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u32, + offsets: svuint32_t, + data: svuint32_t, +) { + svst1_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64offset_f64( + pg: svbool_t, + base: *mut f64, + offsets: svuint64_t, + data: svfloat64_t, +) { + svst1_scatter_s64offset_f64(pg, base, offsets.as_signed(), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i64, + offsets: svuint64_t, + data: svint64_t, +) { + svst1_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u64, + offsets: svuint64_t, + data: svuint64_t, +) { + svst1_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_f32(pg: svbool_t, bases: svuint32_t, data: svfloat32_t) { + svst1_scatter_u32base_offset_f32(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svst1_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svst1_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_f64(pg: svbool_t, bases: svuint64_t, data: svfloat64_t) { + svst1_scatter_u64base_offset_f64(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svst1_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svst1_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svfloat32_t, +) { + svst1_scatter_u32base_offset_f32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svint32_t, +) { + svst1_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svuint32_t, +) { + svst1_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svfloat64_t, +) { + svst1_scatter_u64base_offset_f64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svst1_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svst1_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svfloat32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svst1_scatter_u32base_offset_f32( + data: svfloat32_t, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svst1_scatter_u32base_offset_f32(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svst1_scatter_u32base_offset_s32( + data: svint32_t, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svst1_scatter_u32base_offset_s32(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svst1_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svfloat64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svst1_scatter_u64base_offset_f64( + data: svfloat64_t, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1_scatter_u64base_offset_f64(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svst1_scatter_u64base_offset_s64( + data: svint64_t, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1_scatter_u64base_offset_s64(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svst1_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32_t) { + svst1_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64_t) { + svst1_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8_t) { + svst1_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16_t) { + svst1_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32_t) { + svst1_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64_t) { + svst1_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8_t) { + svst1_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16_t) { + svst1_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32_t) { + svst1_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64_t) { + svst1_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_s16(pg: svbool_t, base: *mut i8, data: svint16_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv8i8")] + fn _svst1b_s16(data: nxv8i8, pg: svbool8_t, ptr: *mut i8); + } + _svst1b_s16( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_s32(pg: svbool_t, base: *mut i8, data: svint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i8")] + fn _svst1b_s32(data: nxv4i8, pg: svbool4_t, ptr: *mut i8); + } + _svst1b_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_s32(pg: svbool_t, base: *mut i16, data: svint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i16")] + fn _svst1h_s32(data: nxv4i16, pg: svbool4_t, ptr: *mut i16); + } + _svst1h_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_s64(pg: svbool_t, base: *mut i8, data: svint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i8")] + fn _svst1b_s64(data: nxv2i8, pg: svbool2_t, ptr: *mut i8); + } + _svst1b_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_s64(pg: svbool_t, base: *mut i16, data: svint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i16")] + fn _svst1h_s64(data: nxv2i16, pg: svbool2_t, ptr: *mut i16); + } + _svst1h_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + ) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_s64(pg: svbool_t, base: *mut i32, data: svint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i32")] + fn _svst1w_s64(data: nxv2i32, pg: svbool2_t, ptr: *mut i32); + } + _svst1w_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_u16(pg: svbool_t, base: *mut u8, data: svuint16_t) { + svst1b_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_u32(pg: svbool_t, base: *mut u8, data: svuint32_t) { + svst1b_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_u32(pg: svbool_t, base: *mut u16, data: svuint32_t) { + svst1h_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_u64(pg: svbool_t, base: *mut u8, data: svuint64_t) { + svst1b_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_u64(pg: svbool_t, base: *mut u16, data: svuint64_t) { + svst1h_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_u64(pg: svbool_t, base: *mut u32, data: svuint64_t) { + svst1w_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_s32offset_s32( + pg: svbool_t, + base: *mut i8, + offsets: svint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4i8" + )] + fn _svst1b_scatter_s32offset_s32( + data: nxv4i8, + pg: svbool4_t, + base: *mut i8, + offsets: svint32_t, + ); + } + _svst1b_scatter_s32offset_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s32offset_s32( + pg: svbool_t, + base: *mut i16, + offsets: svint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4i16" + )] + fn _svst1h_scatter_s32offset_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + offsets: svint32_t, + ); + } + _svst1h_scatter_s32offset_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_s32offset_u32( + pg: svbool_t, + base: *mut u8, + offsets: svint32_t, + data: svuint32_t, +) { + svst1b_scatter_s32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s32offset_u32( + pg: svbool_t, + base: *mut u16, + offsets: svint32_t, + data: svuint32_t, +) { + svst1h_scatter_s32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i8, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2i8" + )] + fn _svst1b_scatter_s64offset_s64( + data: nxv2i8, + pg: svbool2_t, + base: *mut i8, + offsets: svint64_t, + ); + } + _svst1b_scatter_s64offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i16, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2i16" + )] + fn _svst1h_scatter_s64offset_s64( + data: nxv2i16, + pg: svbool2_t, + base: *mut i16, + offsets: svint64_t, + ); + } + _svst1h_scatter_s64offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i32, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2i32" + )] + fn _svst1w_scatter_s64offset_s64( + data: nxv2i32, + pg: svbool2_t, + base: *mut i32, + offsets: svint64_t, + ); + } + _svst1w_scatter_s64offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u8, + offsets: svint64_t, + data: svuint64_t, +) { + svst1b_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u16, + offsets: svint64_t, + data: svuint64_t, +) { + svst1h_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u32, + offsets: svint64_t, + data: svuint64_t, +) { + svst1w_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i8, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4i8" + )] + fn _svst1b_scatter_u32offset_s32( + data: nxv4i8, + pg: svbool4_t, + base: *mut i8, + offsets: svint32_t, + ); + } + _svst1b_scatter_u32offset_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets.as_signed(), + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i16, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4i16" + )] + fn _svst1h_scatter_u32offset_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + offsets: svint32_t, + ); + } + _svst1h_scatter_u32offset_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets.as_signed(), + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u8, + offsets: svuint32_t, + data: svuint32_t, +) { + svst1b_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u16, + offsets: svuint32_t, + data: svuint32_t, +) { + svst1h_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i8, + offsets: svuint64_t, + data: svint64_t, +) { + svst1b_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i16, + offsets: svuint64_t, + data: svint64_t, +) { + svst1h_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i32, + offsets: svuint64_t, + data: svint64_t, +) { + svst1w_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u8, + offsets: svuint64_t, + data: svuint64_t, +) { + svst1b_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u16, + offsets: svuint64_t, + data: svuint64_t, +) { + svst1h_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u32, + offsets: svuint64_t, + data: svuint64_t, +) { + svst1w_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base]_offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svst1b_scatter_u32base_offset_s32( + data: nxv4i8, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svst1b_scatter_u32base_offset_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svst1h_scatter_u32base_offset_s32( + data: nxv4i16, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svst1h_scatter_u32base_offset_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base]_offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svst1b_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svst1h_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svst1b_scatter_u64base_offset_s64( + data: nxv2i8, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1b_scatter_u64base_offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svst1h_scatter_u64base_offset_s64( + data: nxv2i16, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1h_scatter_u64base_offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svst1w_scatter_u64base_offset_s64( + data: nxv2i32, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1w_scatter_u64base_offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svst1b_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svst1h_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svst1w_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svst1b_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svst1h_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svst1b_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svst1h_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svst1b_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svst1h_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svst1w_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svst1b_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svst1h_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svst1w_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_s16(pg: svbool_t, base: *mut i8, vnum: i64, data: svint16_t) { + svst1b_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_s32(pg: svbool_t, base: *mut i8, vnum: i64, data: svint32_t) { + svst1b_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_vnum_s32(pg: svbool_t, base: *mut i16, vnum: i64, data: svint32_t) { + svst1h_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_s64(pg: svbool_t, base: *mut i8, vnum: i64, data: svint64_t) { + svst1b_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_vnum_s64(pg: svbool_t, base: *mut i16, vnum: i64, data: svint64_t) { + svst1h_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_vnum_s64(pg: svbool_t, base: *mut i32, vnum: i64, data: svint64_t) { + svst1w_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_u16(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint16_t) { + svst1b_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_u32(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint32_t) { + svst1b_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_vnum_u32(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint32_t) { + svst1h_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_u64(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint64_t) { + svst1b_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_vnum_u64(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint64_t) { + svst1h_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_vnum_u64(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint64_t) { + svst1w_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s32index_s32( + pg: svbool_t, + base: *mut i16, + indices: svint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4i16" + )] + fn _svst1h_scatter_s32index_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + indices: svint32_t, + ); + } + _svst1h_scatter_s32index_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + indices, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s32index_u32( + pg: svbool_t, + base: *mut u16, + indices: svint32_t, + data: svuint32_t, +) { + svst1h_scatter_s32index_s32(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s64index_s64( + pg: svbool_t, + base: *mut i16, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i16" + )] + fn _svst1h_scatter_s64index_s64( + data: nxv2i16, + pg: svbool2_t, + base: *mut i16, + indices: svint64_t, + ); + } + _svst1h_scatter_s64index_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + indices, + ) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_s64index_s64( + pg: svbool_t, + base: *mut i32, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i32" + )] + fn _svst1w_scatter_s64index_s64( + data: nxv2i32, + pg: svbool2_t, + base: *mut i32, + indices: svint64_t, + ); + } + _svst1w_scatter_s64index_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + indices, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s64index_u64( + pg: svbool_t, + base: *mut u16, + indices: svint64_t, + data: svuint64_t, +) { + svst1h_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_s64index_u64( + pg: svbool_t, + base: *mut u32, + indices: svint64_t, + data: svuint64_t, +) { + svst1w_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32index_s32( + pg: svbool_t, + base: *mut i16, + indices: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i16" + )] + fn _svst1h_scatter_u32index_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + indices: svint32_t, + ); + } + _svst1h_scatter_u32index_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + indices.as_signed(), + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32index_u32( + pg: svbool_t, + base: *mut u16, + indices: svuint32_t, + data: svuint32_t, +) { + svst1h_scatter_u32index_s32(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64index_s64( + pg: svbool_t, + base: *mut i16, + indices: svuint64_t, + data: svint64_t, +) { + svst1h_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64index_s64( + pg: svbool_t, + base: *mut i32, + indices: svuint64_t, + data: svint64_t, +) { + svst1w_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64index_u64( + pg: svbool_t, + base: *mut u16, + indices: svuint64_t, + data: svuint64_t, +) { + svst1h_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64index_u64( + pg: svbool_t, + base: *mut u32, + indices: svuint64_t, + data: svuint64_t, +) { + svst1w_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svint32_t, +) { + svst1h_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svuint32_t, +) { + svst1h_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svst1h_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svst1w_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svst1h_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svst1w_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_f32(pg: svbool_t, base: *mut f32, data: svfloat32x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv4f32")] + fn _svst2_f32(data0: svfloat32_t, data1: svfloat32_t, pg: svbool4_t, ptr: *mut f32); + } + _svst2_f32( + svget2_f32::<0>(data), + svget2_f32::<1>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_f64(pg: svbool_t, base: *mut f64, data: svfloat64x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv2f64")] + fn _svst2_f64(data0: svfloat64_t, data1: svfloat64_t, pg: svbool2_t, ptr: *mut f64); + } + _svst2_f64( + svget2_f64::<0>(data), + svget2_f64::<1>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2b))] +pub unsafe fn svst2_s8(pg: svbool_t, base: *mut i8, data: svint8x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv16i8")] + fn _svst2_s8(data0: svint8_t, data1: svint8_t, pg: svbool_t, ptr: *mut i8); + } + _svst2_s8(svget2_s8::<0>(data), svget2_s8::<1>(data), pg, base) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2h))] +pub unsafe fn svst2_s16(pg: svbool_t, base: *mut i16, data: svint16x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv8i16")] + fn _svst2_s16(data0: svint16_t, data1: svint16_t, pg: svbool8_t, ptr: *mut i16); + } + _svst2_s16( + svget2_s16::<0>(data), + svget2_s16::<1>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_s32(pg: svbool_t, base: *mut i32, data: svint32x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv4i32")] + fn _svst2_s32(data0: svint32_t, data1: svint32_t, pg: svbool4_t, ptr: *mut i32); + } + _svst2_s32( + svget2_s32::<0>(data), + svget2_s32::<1>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_s64(pg: svbool_t, base: *mut i64, data: svint64x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv2i64")] + fn _svst2_s64(data0: svint64_t, data1: svint64_t, pg: svbool2_t, ptr: *mut i64); + } + _svst2_s64( + svget2_s64::<0>(data), + svget2_s64::<1>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2b))] +pub unsafe fn svst2_u8(pg: svbool_t, base: *mut u8, data: svuint8x2_t) { + svst2_s8(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2h))] +pub unsafe fn svst2_u16(pg: svbool_t, base: *mut u16, data: svuint16x2_t) { + svst2_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_u32(pg: svbool_t, base: *mut u32, data: svuint32x2_t) { + svst2_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_u64(pg: svbool_t, base: *mut u64, data: svuint64x2_t) { + svst2_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32x2_t) { + svst2_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64x2_t) { + svst2_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2b))] +pub unsafe fn svst2_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8x2_t) { + svst2_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2h))] +pub unsafe fn svst2_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16x2_t) { + svst2_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32x2_t) { + svst2_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64x2_t) { + svst2_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2b))] +pub unsafe fn svst2_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8x2_t) { + svst2_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2h))] +pub unsafe fn svst2_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16x2_t) { + svst2_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32x2_t) { + svst2_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64x2_t) { + svst2_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_f32(pg: svbool_t, base: *mut f32, data: svfloat32x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv4f32")] + fn _svst3_f32( + data0: svfloat32_t, + data1: svfloat32_t, + data2: svfloat32_t, + pg: svbool4_t, + ptr: *mut f32, + ); + } + _svst3_f32( + svget3_f32::<0>(data), + svget3_f32::<1>(data), + svget3_f32::<2>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_f64(pg: svbool_t, base: *mut f64, data: svfloat64x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv2f64")] + fn _svst3_f64( + data0: svfloat64_t, + data1: svfloat64_t, + data2: svfloat64_t, + pg: svbool2_t, + ptr: *mut f64, + ); + } + _svst3_f64( + svget3_f64::<0>(data), + svget3_f64::<1>(data), + svget3_f64::<2>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3b))] +pub unsafe fn svst3_s8(pg: svbool_t, base: *mut i8, data: svint8x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv16i8")] + fn _svst3_s8(data0: svint8_t, data1: svint8_t, data2: svint8_t, pg: svbool_t, ptr: *mut i8); + } + _svst3_s8( + svget3_s8::<0>(data), + svget3_s8::<1>(data), + svget3_s8::<2>(data), + pg, + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3h))] +pub unsafe fn svst3_s16(pg: svbool_t, base: *mut i16, data: svint16x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv8i16")] + fn _svst3_s16( + data0: svint16_t, + data1: svint16_t, + data2: svint16_t, + pg: svbool8_t, + ptr: *mut i16, + ); + } + _svst3_s16( + svget3_s16::<0>(data), + svget3_s16::<1>(data), + svget3_s16::<2>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_s32(pg: svbool_t, base: *mut i32, data: svint32x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv4i32")] + fn _svst3_s32( + data0: svint32_t, + data1: svint32_t, + data2: svint32_t, + pg: svbool4_t, + ptr: *mut i32, + ); + } + _svst3_s32( + svget3_s32::<0>(data), + svget3_s32::<1>(data), + svget3_s32::<2>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_s64(pg: svbool_t, base: *mut i64, data: svint64x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv2i64")] + fn _svst3_s64( + data0: svint64_t, + data1: svint64_t, + data2: svint64_t, + pg: svbool2_t, + ptr: *mut i64, + ); + } + _svst3_s64( + svget3_s64::<0>(data), + svget3_s64::<1>(data), + svget3_s64::<2>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3b))] +pub unsafe fn svst3_u8(pg: svbool_t, base: *mut u8, data: svuint8x3_t) { + svst3_s8(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3h))] +pub unsafe fn svst3_u16(pg: svbool_t, base: *mut u16, data: svuint16x3_t) { + svst3_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_u32(pg: svbool_t, base: *mut u32, data: svuint32x3_t) { + svst3_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_u64(pg: svbool_t, base: *mut u64, data: svuint64x3_t) { + svst3_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32x3_t) { + svst3_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64x3_t) { + svst3_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3b))] +pub unsafe fn svst3_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8x3_t) { + svst3_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3h))] +pub unsafe fn svst3_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16x3_t) { + svst3_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32x3_t) { + svst3_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64x3_t) { + svst3_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3b))] +pub unsafe fn svst3_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8x3_t) { + svst3_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3h))] +pub unsafe fn svst3_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16x3_t) { + svst3_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32x3_t) { + svst3_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64x3_t) { + svst3_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_f32(pg: svbool_t, base: *mut f32, data: svfloat32x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv4f32")] + fn _svst4_f32( + data0: svfloat32_t, + data1: svfloat32_t, + data2: svfloat32_t, + data3: svfloat32_t, + pg: svbool4_t, + ptr: *mut f32, + ); + } + _svst4_f32( + svget4_f32::<0>(data), + svget4_f32::<1>(data), + svget4_f32::<2>(data), + svget4_f32::<3>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_f64(pg: svbool_t, base: *mut f64, data: svfloat64x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv2f64")] + fn _svst4_f64( + data0: svfloat64_t, + data1: svfloat64_t, + data2: svfloat64_t, + data3: svfloat64_t, + pg: svbool2_t, + ptr: *mut f64, + ); + } + _svst4_f64( + svget4_f64::<0>(data), + svget4_f64::<1>(data), + svget4_f64::<2>(data), + svget4_f64::<3>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4b))] +pub unsafe fn svst4_s8(pg: svbool_t, base: *mut i8, data: svint8x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv16i8")] + fn _svst4_s8( + data0: svint8_t, + data1: svint8_t, + data2: svint8_t, + data3: svint8_t, + pg: svbool_t, + ptr: *mut i8, + ); + } + _svst4_s8( + svget4_s8::<0>(data), + svget4_s8::<1>(data), + svget4_s8::<2>(data), + svget4_s8::<3>(data), + pg, + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4h))] +pub unsafe fn svst4_s16(pg: svbool_t, base: *mut i16, data: svint16x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv8i16")] + fn _svst4_s16( + data0: svint16_t, + data1: svint16_t, + data2: svint16_t, + data3: svint16_t, + pg: svbool8_t, + ptr: *mut i16, + ); + } + _svst4_s16( + svget4_s16::<0>(data), + svget4_s16::<1>(data), + svget4_s16::<2>(data), + svget4_s16::<3>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_s32(pg: svbool_t, base: *mut i32, data: svint32x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv4i32")] + fn _svst4_s32( + data0: svint32_t, + data1: svint32_t, + data2: svint32_t, + data3: svint32_t, + pg: svbool4_t, + ptr: *mut i32, + ); + } + _svst4_s32( + svget4_s32::<0>(data), + svget4_s32::<1>(data), + svget4_s32::<2>(data), + svget4_s32::<3>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_s64(pg: svbool_t, base: *mut i64, data: svint64x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv2i64")] + fn _svst4_s64( + data0: svint64_t, + data1: svint64_t, + data2: svint64_t, + data3: svint64_t, + pg: svbool2_t, + ptr: *mut i64, + ); + } + _svst4_s64( + svget4_s64::<0>(data), + svget4_s64::<1>(data), + svget4_s64::<2>(data), + svget4_s64::<3>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4b))] +pub unsafe fn svst4_u8(pg: svbool_t, base: *mut u8, data: svuint8x4_t) { + svst4_s8(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4h))] +pub unsafe fn svst4_u16(pg: svbool_t, base: *mut u16, data: svuint16x4_t) { + svst4_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_u32(pg: svbool_t, base: *mut u32, data: svuint32x4_t) { + svst4_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_u64(pg: svbool_t, base: *mut u64, data: svuint64x4_t) { + svst4_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32x4_t) { + svst4_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64x4_t) { + svst4_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4b))] +pub unsafe fn svst4_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8x4_t) { + svst4_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4h))] +pub unsafe fn svst4_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16x4_t) { + svst4_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32x4_t) { + svst4_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64x4_t) { + svst4_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4b))] +pub unsafe fn svst4_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8x4_t) { + svst4_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4h))] +pub unsafe fn svst4_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16x4_t) { + svst4_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32x4_t) { + svst4_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64x4_t) { + svst4_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_f32(pg: svbool_t, base: *mut f32, data: svfloat32_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv4f32")] + fn _svstnt1_f32(data: svfloat32_t, pg: svbool4_t, ptr: *mut f32); + } + _svstnt1_f32(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_f64(pg: svbool_t, base: *mut f64, data: svfloat64_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv2f64")] + fn _svstnt1_f64(data: svfloat64_t, pg: svbool2_t, ptr: *mut f64); + } + _svstnt1_f64(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1_s8(pg: svbool_t, base: *mut i8, data: svint8_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv16i8")] + fn _svstnt1_s8(data: svint8_t, pg: svbool_t, ptr: *mut i8); + } + _svstnt1_s8(data, pg, base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1_s16(pg: svbool_t, base: *mut i16, data: svint16_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv8i16")] + fn _svstnt1_s16(data: svint16_t, pg: svbool8_t, ptr: *mut i16); + } + _svstnt1_s16(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_s32(pg: svbool_t, base: *mut i32, data: svint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv4i32")] + fn _svstnt1_s32(data: svint32_t, pg: svbool4_t, ptr: *mut i32); + } + _svstnt1_s32(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_s64(pg: svbool_t, base: *mut i64, data: svint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv2i64")] + fn _svstnt1_s64(data: svint64_t, pg: svbool2_t, ptr: *mut i64); + } + _svstnt1_s64(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1_u8(pg: svbool_t, base: *mut u8, data: svuint8_t) { + svstnt1_s8(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1_u16(pg: svbool_t, base: *mut u16, data: svuint16_t) { + svstnt1_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_u32(pg: svbool_t, base: *mut u32, data: svuint32_t) { + svstnt1_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_u64(pg: svbool_t, base: *mut u64, data: svuint64_t) { + svstnt1_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32_t) { + svstnt1_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64_t) { + svstnt1_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8_t) { + svstnt1_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16_t) { + svstnt1_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32_t) { + svstnt1_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64_t) { + svstnt1_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8_t) { + svstnt1_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16_t) { + svstnt1_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32_t) { + svstnt1_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64_t) { + svstnt1_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv4f32")] + fn _svsub_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svsub_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsub_f32_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsub_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv2f64")] + fn _svsub_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsub_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsub_f64_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsub_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv16i8")] + fn _svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svsub_s8_m(pg, op1, op2) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsub_s8_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv8i16")] + fn _svsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsub_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsub_s16_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv4i32")] + fn _svsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsub_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsub_s32_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv2i64")] + fn _svsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svsub_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsub_s64_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svsub_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsub_u8_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svsub_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsub_u16_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svsub_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsub_u32_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svsub_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsub_u64_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsubr.nxv4f32")] + fn _svsubr_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svsubr_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsubr_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsubr_f32_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsubr_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsubr_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsubr_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsubr.nxv2f64")] + fn _svsubr_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsubr_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsubr_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsubr_f64_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsubr_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsubr_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsubr_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv16i8")] + fn _svsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svsubr_s8_m(pg, op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsubr_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsubr_s8_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsubr_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsubr_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv8i16")] + fn _svsubr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsubr_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsubr_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsubr_s16_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsubr_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsubr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv4i32")] + fn _svsubr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsubr_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsubr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsubr_s32_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsubr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsubr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv2i64")] + fn _svsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svsubr_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsubr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsubr_s64_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsubr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsubr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svsubr_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsubr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsubr_u8_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsubr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsubr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svsubr_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsubr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsubr_u16_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsubr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsubr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svsubr_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsubr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsubr_u32_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsubr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsubr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svsubr_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsubr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsubr_u64_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsubr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsubr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Dot product (signed × unsigned)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sudot, IMM_INDEX = 0))] +pub fn svsudot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svuint8_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sudot.lane.nxv4i32" + )] + fn _svsudot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + ) -> svint32_t; + } + unsafe { _svsudot_lane_s32(op1, op2, op3.as_signed(), IMM_INDEX) } +} +#[doc = "Dot product (signed × unsigned)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usdot))] +pub fn svsudot_s32(op1: svint32_t, op2: svint8_t, op3: svuint8_t) -> svint32_t { + svusdot_s32(op1, op3, op2) +} +#[doc = "Dot product (signed × unsigned)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usdot))] +pub fn svsudot_n_s32(op1: svint32_t, op2: svint8_t, op3: u8) -> svint32_t { + svsudot_s32(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_f32(data: svfloat32_t, indices: svuint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv4f32")] + fn _svtbl_f32(data: svfloat32_t, indices: svint32_t) -> svfloat32_t; + } + unsafe { _svtbl_f32(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_f64(data: svfloat64_t, indices: svuint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv2f64")] + fn _svtbl_f64(data: svfloat64_t, indices: svint64_t) -> svfloat64_t; + } + unsafe { _svtbl_f64(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_s8(data: svint8_t, indices: svuint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv16i8")] + fn _svtbl_s8(data: svint8_t, indices: svint8_t) -> svint8_t; + } + unsafe { _svtbl_s8(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_s16(data: svint16_t, indices: svuint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv8i16")] + fn _svtbl_s16(data: svint16_t, indices: svint16_t) -> svint16_t; + } + unsafe { _svtbl_s16(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_s32(data: svint32_t, indices: svuint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv4i32")] + fn _svtbl_s32(data: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svtbl_s32(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_s64(data: svint64_t, indices: svuint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv2i64")] + fn _svtbl_s64(data: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svtbl_s64(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_u8(data: svuint8_t, indices: svuint8_t) -> svuint8_t { + unsafe { svtbl_s8(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_u16(data: svuint16_t, indices: svuint16_t) -> svuint16_t { + unsafe { svtbl_s16(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_u32(data: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svtbl_s32(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_u64(data: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svtbl_s64(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Trigonometric multiply-add coefficient"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtmad[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ftmad, IMM3 = 0))] +pub fn svtmad_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + static_assert_range!(IMM3, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftmad.x.nxv4f32" + )] + fn _svtmad_f32(op1: svfloat32_t, op2: svfloat32_t, imm3: i32) -> svfloat32_t; + } + unsafe { _svtmad_f32(op1, op2, IMM3) } +} +#[doc = "Trigonometric multiply-add coefficient"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtmad[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ftmad, IMM3 = 0))] +pub fn svtmad_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + static_assert_range!(IMM3, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftmad.x.nxv2f64" + )] + fn _svtmad_f64(op1: svfloat64_t, op2: svfloat64_t, imm3: i32) -> svfloat64_t; + } + unsafe { _svtmad_f64(op1, op2, IMM3) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv16i1")] + fn _svtrn1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svtrn1_b8(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv8i1")] + fn _svtrn1_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svtrn1_b16(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv4i1")] + fn _svtrn1_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svtrn1_b32(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv2i1")] + fn _svtrn1_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svtrn1_b64(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv4f32")] + fn _svtrn1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svtrn1_f32(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv2f64")] + fn _svtrn1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svtrn1_f64(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv16i8")] + fn _svtrn1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svtrn1_s8(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv8i16")] + fn _svtrn1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svtrn1_s16(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv4i32")] + fn _svtrn1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svtrn1_s32(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv2i64")] + fn _svtrn1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svtrn1_s64(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svtrn1_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svtrn1_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svtrn1_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svtrn1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv4f32")] + fn _svtrn1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svtrn1q_f32(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv2f64")] + fn _svtrn1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svtrn1q_f64(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv16i8")] + fn _svtrn1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svtrn1q_s8(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv8i16")] + fn _svtrn1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svtrn1q_s16(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv4i32")] + fn _svtrn1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svtrn1q_s32(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv2i64")] + fn _svtrn1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svtrn1q_s64(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svtrn1q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svtrn1q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svtrn1q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svtrn1q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv16i1")] + fn _svtrn2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svtrn2_b8(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv8i1")] + fn _svtrn2_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svtrn2_b16(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv4i1")] + fn _svtrn2_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svtrn2_b32(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv2i1")] + fn _svtrn2_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svtrn2_b64(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv4f32")] + fn _svtrn2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svtrn2_f32(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv2f64")] + fn _svtrn2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svtrn2_f64(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv16i8")] + fn _svtrn2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svtrn2_s8(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv8i16")] + fn _svtrn2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svtrn2_s16(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv4i32")] + fn _svtrn2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svtrn2_s32(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv2i64")] + fn _svtrn2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svtrn2_s64(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svtrn2_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svtrn2_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svtrn2_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svtrn2_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv4f32")] + fn _svtrn2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svtrn2q_f32(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv2f64")] + fn _svtrn2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svtrn2q_f64(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv16i8")] + fn _svtrn2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svtrn2q_s8(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv8i16")] + fn _svtrn2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svtrn2q_s16(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv4i32")] + fn _svtrn2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svtrn2q_s32(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv2i64")] + fn _svtrn2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svtrn2q_s64(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svtrn2q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svtrn2q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svtrn2q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svtrn2q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Trigonometric starting value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtsmul[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ftsmul))] +pub fn svtsmul_f32(op1: svfloat32_t, op2: svuint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftsmul.x.nxv4f32" + )] + fn _svtsmul_f32(op1: svfloat32_t, op2: svint32_t) -> svfloat32_t; + } + unsafe { _svtsmul_f32(op1, op2.as_signed()) } +} +#[doc = "Trigonometric starting value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtsmul[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ftsmul))] +pub fn svtsmul_f64(op1: svfloat64_t, op2: svuint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftsmul.x.nxv2f64" + )] + fn _svtsmul_f64(op1: svfloat64_t, op2: svint64_t) -> svfloat64_t; + } + unsafe { _svtsmul_f64(op1, op2.as_signed()) } +} +#[doc = "Trigonometric select coefficient"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtssel[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ftssel))] +pub fn svtssel_f32(op1: svfloat32_t, op2: svuint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftssel.x.nxv4f32" + )] + fn _svtssel_f32(op1: svfloat32_t, op2: svint32_t) -> svfloat32_t; + } + unsafe { _svtssel_f32(op1, op2.as_signed()) } +} +#[doc = "Trigonometric select coefficient"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtssel[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ftssel))] +pub fn svtssel_f64(op1: svfloat64_t, op2: svuint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftssel.x.nxv2f64" + )] + fn _svtssel_f64(op1: svfloat64_t, op2: svint64_t) -> svfloat64_t; + } + unsafe { _svtssel_f64(op1, op2.as_signed()) } +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_f32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_f32() -> svfloat32x2_t { + svcreate2_f32(svdup_n_f32(0f32), svdup_n_f32(0f32)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_f64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_f64() -> svfloat64x2_t { + svcreate2_f64(svdup_n_f64(0f64), svdup_n_f64(0f64)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_s8() -> svint8x2_t { + svcreate2_s8(svdup_n_s8(0), svdup_n_s8(0)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_s16() -> svint16x2_t { + svcreate2_s16(svdup_n_s16(0), svdup_n_s16(0)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_s32() -> svint32x2_t { + svcreate2_s32(svdup_n_s32(0), svdup_n_s32(0)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_s64() -> svint64x2_t { + svcreate2_s64(svdup_n_s64(0), svdup_n_s64(0)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_u8() -> svuint8x2_t { + svcreate2_u8(svdup_n_u8(0), svdup_n_u8(0)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_u16() -> svuint16x2_t { + svcreate2_u16(svdup_n_u16(0), svdup_n_u16(0)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_u32() -> svuint32x2_t { + svcreate2_u32(svdup_n_u32(0), svdup_n_u32(0)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_u64() -> svuint64x2_t { + svcreate2_u64(svdup_n_u64(0), svdup_n_u64(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_f32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_f32() -> svfloat32x3_t { + svcreate3_f32(svdup_n_f32(0f32), svdup_n_f32(0f32), svdup_n_f32(0f32)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_f64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_f64() -> svfloat64x3_t { + svcreate3_f64(svdup_n_f64(0f64), svdup_n_f64(0f64), svdup_n_f64(0f64)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_s8() -> svint8x3_t { + svcreate3_s8(svdup_n_s8(0), svdup_n_s8(0), svdup_n_s8(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_s16() -> svint16x3_t { + svcreate3_s16(svdup_n_s16(0), svdup_n_s16(0), svdup_n_s16(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_s32() -> svint32x3_t { + svcreate3_s32(svdup_n_s32(0), svdup_n_s32(0), svdup_n_s32(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_s64() -> svint64x3_t { + svcreate3_s64(svdup_n_s64(0), svdup_n_s64(0), svdup_n_s64(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_u8() -> svuint8x3_t { + svcreate3_u8(svdup_n_u8(0), svdup_n_u8(0), svdup_n_u8(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_u16() -> svuint16x3_t { + svcreate3_u16(svdup_n_u16(0), svdup_n_u16(0), svdup_n_u16(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_u32() -> svuint32x3_t { + svcreate3_u32(svdup_n_u32(0), svdup_n_u32(0), svdup_n_u32(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_u64() -> svuint64x3_t { + svcreate3_u64(svdup_n_u64(0), svdup_n_u64(0), svdup_n_u64(0)) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_f32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_f32() -> svfloat32x4_t { + svcreate4_f32( + svdup_n_f32(0f32), + svdup_n_f32(0f32), + svdup_n_f32(0f32), + svdup_n_f32(0f32), + ) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_f64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_f64() -> svfloat64x4_t { + svcreate4_f64( + svdup_n_f64(0f64), + svdup_n_f64(0f64), + svdup_n_f64(0f64), + svdup_n_f64(0f64), + ) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_s8() -> svint8x4_t { + svcreate4_s8(svdup_n_s8(0), svdup_n_s8(0), svdup_n_s8(0), svdup_n_s8(0)) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_s16() -> svint16x4_t { + svcreate4_s16( + svdup_n_s16(0), + svdup_n_s16(0), + svdup_n_s16(0), + svdup_n_s16(0), + ) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_s32() -> svint32x4_t { + svcreate4_s32( + svdup_n_s32(0), + svdup_n_s32(0), + svdup_n_s32(0), + svdup_n_s32(0), + ) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_s64() -> svint64x4_t { + svcreate4_s64( + svdup_n_s64(0), + svdup_n_s64(0), + svdup_n_s64(0), + svdup_n_s64(0), + ) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_u8() -> svuint8x4_t { + svcreate4_u8(svdup_n_u8(0), svdup_n_u8(0), svdup_n_u8(0), svdup_n_u8(0)) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_u16() -> svuint16x4_t { + svcreate4_u16( + svdup_n_u16(0), + svdup_n_u16(0), + svdup_n_u16(0), + svdup_n_u16(0), + ) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_u32() -> svuint32x4_t { + svcreate4_u32( + svdup_n_u32(0), + svdup_n_u32(0), + svdup_n_u32(0), + svdup_n_u32(0), + ) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_u64() -> svuint64x4_t { + svcreate4_u64( + svdup_n_u64(0), + svdup_n_u64(0), + svdup_n_u64(0), + svdup_n_u64(0), + ) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_f32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_f32() -> svfloat32_t { + svdup_n_f32(0f32) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_f64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_f64() -> svfloat64_t { + svdup_n_f64(0f64) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_s8() -> svint8_t { + svdup_n_s8(0) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_s16() -> svint16_t { + svdup_n_s16(0) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_s32() -> svint32_t { + svdup_n_s32(0) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_s64() -> svint64_t { + svdup_n_s64(0) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_u8() -> svuint8_t { + svdup_n_u8(0) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_u16() -> svuint16_t { + svdup_n_u16(0) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_u32() -> svuint32_t { + svdup_n_u32(0) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_u64() -> svuint64_t { + svdup_n_u64(0) +} +#[doc = "Dot product (unsigned × signed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usdot, IMM_INDEX = 0))] +pub fn svusdot_lane_s32( + op1: svint32_t, + op2: svuint8_t, + op3: svint8_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.usdot.lane.nxv4i32" + )] + fn _svusdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + ) -> svint32_t; + } + unsafe { _svusdot_lane_s32(op1, op2.as_signed(), op3, IMM_INDEX) } +} +#[doc = "Dot product (unsigned × signed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usdot))] +pub fn svusdot_s32(op1: svint32_t, op2: svuint8_t, op3: svint8_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usdot.nxv4i32")] + fn _svusdot_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svusdot_s32(op1, op2.as_signed(), op3) } +} +#[doc = "Dot product (unsigned × signed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usdot))] +pub fn svusdot_n_s32(op1: svint32_t, op2: svuint8_t, op3: i8) -> svint32_t { + svusdot_s32(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Matrix multiply-accumulate (unsigned × signed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusmmla[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usmmla))] +pub fn svusmmla_s32(op1: svint32_t, op2: svuint8_t, op3: svint8_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usmmla.nxv4i32")] + fn _svusmmla_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svusmmla_s32(op1, op2.as_signed(), op3) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv16i1")] + fn _svuzp1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svuzp1_b8(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv8i1")] + fn _svuzp1_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svuzp1_b16(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv4i1")] + fn _svuzp1_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svuzp1_b32(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv2i1")] + fn _svuzp1_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svuzp1_b64(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv4f32")] + fn _svuzp1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svuzp1_f32(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv2f64")] + fn _svuzp1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svuzp1_f64(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv16i8")] + fn _svuzp1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuzp1_s8(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv8i16")] + fn _svuzp1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuzp1_s16(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv4i32")] + fn _svuzp1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuzp1_s32(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv2i64")] + fn _svuzp1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svuzp1_s64(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svuzp1_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svuzp1_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svuzp1_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svuzp1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv4f32")] + fn _svuzp1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svuzp1q_f32(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv2f64")] + fn _svuzp1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svuzp1q_f64(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv16i8")] + fn _svuzp1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuzp1q_s8(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv8i16")] + fn _svuzp1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuzp1q_s16(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv4i32")] + fn _svuzp1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuzp1q_s32(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv2i64")] + fn _svuzp1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svuzp1q_s64(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svuzp1q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svuzp1q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svuzp1q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svuzp1q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv16i1")] + fn _svuzp2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svuzp2_b8(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv8i1")] + fn _svuzp2_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svuzp2_b16(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv4i1")] + fn _svuzp2_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svuzp2_b32(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv2i1")] + fn _svuzp2_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svuzp2_b64(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv4f32")] + fn _svuzp2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svuzp2_f32(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv2f64")] + fn _svuzp2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svuzp2_f64(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv16i8")] + fn _svuzp2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuzp2_s8(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv8i16")] + fn _svuzp2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuzp2_s16(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv4i32")] + fn _svuzp2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuzp2_s32(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv2i64")] + fn _svuzp2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svuzp2_s64(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svuzp2_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svuzp2_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svuzp2_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svuzp2_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv4f32")] + fn _svuzp2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svuzp2q_f32(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv2f64")] + fn _svuzp2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svuzp2q_f64(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv16i8")] + fn _svuzp2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuzp2q_s8(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv8i16")] + fn _svuzp2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuzp2q_s16(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv4i32")] + fn _svuzp2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuzp2q_s32(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv2i64")] + fn _svuzp2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svuzp2q_s64(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svuzp2q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svuzp2q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svuzp2q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svuzp2q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b8_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv16i1.i32" + )] + fn _svwhilele_b8_s32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilele_b8_s32(op1, op2) } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b16_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv8i1.i32" + )] + fn _svwhilele_b16_s32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilele_b16_s32(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b32_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv4i1.i32" + )] + fn _svwhilele_b32_s32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilele_b32_s32(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b64_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv2i1.i32" + )] + fn _svwhilele_b64_s32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilele_b64_s32(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b8_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv16i1.i64" + )] + fn _svwhilele_b8_s64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilele_b8_s64(op1, op2) } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b16_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv8i1.i64" + )] + fn _svwhilele_b16_s64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilele_b16_s64(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b32_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv4i1.i64" + )] + fn _svwhilele_b32_s64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilele_b32_s64(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b64_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv2i1.i64" + )] + fn _svwhilele_b64_s64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilele_b64_s64(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b8_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv16i1.i32" + )] + fn _svwhilele_b8_u32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilele_b8_u32(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b16_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv8i1.i32" + )] + fn _svwhilele_b16_u32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilele_b16_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b32_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv4i1.i32" + )] + fn _svwhilele_b32_u32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilele_b32_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b64_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv2i1.i32" + )] + fn _svwhilele_b64_u32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilele_b64_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b8_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv16i1.i64" + )] + fn _svwhilele_b8_u64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilele_b8_u64(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b16_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv8i1.i64" + )] + fn _svwhilele_b16_u64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilele_b16_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b32_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv4i1.i64" + )] + fn _svwhilele_b32_u64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilele_b32_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b64_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv2i1.i64" + )] + fn _svwhilele_b64_u64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilele_b64_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b8_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv16i1.i32" + )] + fn _svwhilelt_b8_s32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilelt_b8_s32(op1, op2) } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b16_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv8i1.i32" + )] + fn _svwhilelt_b16_s32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilelt_b16_s32(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b32_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv4i1.i32" + )] + fn _svwhilelt_b32_s32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilelt_b32_s32(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b64_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv2i1.i32" + )] + fn _svwhilelt_b64_s32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilelt_b64_s32(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b8_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv16i1.i64" + )] + fn _svwhilelt_b8_s64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilelt_b8_s64(op1, op2) } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b16_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv8i1.i64" + )] + fn _svwhilelt_b16_s64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilelt_b16_s64(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b32_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv4i1.i64" + )] + fn _svwhilelt_b32_s64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilelt_b32_s64(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b64_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv2i1.i64" + )] + fn _svwhilelt_b64_s64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilelt_b64_s64(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b8_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv16i1.i32" + )] + fn _svwhilelt_b8_u32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilelt_b8_u32(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b16_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv8i1.i32" + )] + fn _svwhilelt_b16_u32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilelt_b16_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b32_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv4i1.i32" + )] + fn _svwhilelt_b32_u32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilelt_b32_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b64_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv2i1.i32" + )] + fn _svwhilelt_b64_u32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilelt_b64_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b8_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv16i1.i64" + )] + fn _svwhilelt_b8_u64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilelt_b8_u64(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b16_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv8i1.i64" + )] + fn _svwhilelt_b16_u64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilelt_b16_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b32_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv4i1.i64" + )] + fn _svwhilelt_b32_u64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilelt_b32_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b64_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv2i1.i64" + )] + fn _svwhilelt_b64_u64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilelt_b64_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Write to the first-fault register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwrffr)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(wrffr))] +pub fn svwrffr(op: svbool_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.wrffr")] + fn _svwrffr(op: svbool_t); + } + unsafe { _svwrffr(op) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv16i1")] + fn _svzip1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svzip1_b8(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv8i1")] + fn _svzip1_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svzip1_b16(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv4i1")] + fn _svzip1_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svzip1_b32(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv2i1")] + fn _svzip1_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svzip1_b64(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv4f32")] + fn _svzip1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svzip1_f32(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv2f64")] + fn _svzip1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svzip1_f64(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv16i8")] + fn _svzip1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svzip1_s8(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv8i16")] + fn _svzip1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svzip1_s16(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv4i32")] + fn _svzip1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svzip1_s32(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv2i64")] + fn _svzip1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svzip1_s64(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svzip1_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svzip1_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svzip1_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svzip1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv4f32")] + fn _svzip1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svzip1q_f32(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv2f64")] + fn _svzip1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svzip1q_f64(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv16i8")] + fn _svzip1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svzip1q_s8(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv8i16")] + fn _svzip1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svzip1q_s16(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv4i32")] + fn _svzip1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svzip1q_s32(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv2i64")] + fn _svzip1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svzip1q_s64(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svzip1q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svzip1q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svzip1q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svzip1q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv16i1")] + fn _svzip2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svzip2_b8(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv8i1")] + fn _svzip2_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svzip2_b16(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv4i1")] + fn _svzip2_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svzip2_b32(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv2i1")] + fn _svzip2_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svzip2_b64(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv4f32")] + fn _svzip2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svzip2_f32(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv2f64")] + fn _svzip2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svzip2_f64(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv16i8")] + fn _svzip2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svzip2_s8(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv8i16")] + fn _svzip2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svzip2_s16(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv4i32")] + fn _svzip2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svzip2_s32(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv2i64")] + fn _svzip2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svzip2_s64(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svzip2_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svzip2_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svzip2_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svzip2_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv4f32")] + fn _svzip2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svzip2q_f32(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv2f64")] + fn _svzip2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svzip2q_f64(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv16i8")] + fn _svzip2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svzip2q_s8(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv8i16")] + fn _svzip2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svzip2q_s16(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv4i32")] + fn _svzip2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svzip2q_s32(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv2i64")] + fn _svzip2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svzip2q_s64(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svzip2q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svzip2q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svzip2q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svzip2q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/ld_st_tests_aarch64.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/ld_st_tests_aarch64.rs new file mode 100644 index 0000000000000..973b7e9fa35a0 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/ld_st_tests_aarch64.rs @@ -0,0 +1,9345 @@ +// This code is automatically generated. DO NOT MODIFY. +// +// Instead, modify `crates/stdarch-gen-arm/spec/sve` and run the following command to re-generate +// this file: +// +// ``` +// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec +// ``` +#![allow(unused)] +use super::*; +use std::boxed::Box; +use std::convert::{TryFrom, TryInto}; +use std::sync::LazyLock; +use std::vec::Vec; +use stdarch_test::simd_test; +static F32_DATA: LazyLock<[f32; 64 * 5]> = LazyLock::new(|| { + (0..64 * 5) + .map(|i| i as f32) + .collect::>() + .try_into() + .expect("f32 data incorrectly initialised") +}); +static F64_DATA: LazyLock<[f64; 32 * 5]> = LazyLock::new(|| { + (0..32 * 5) + .map(|i| i as f64) + .collect::>() + .try_into() + .expect("f64 data incorrectly initialised") +}); +static I8_DATA: LazyLock<[i8; 256 * 5]> = LazyLock::new(|| { + (0..256 * 5) + .map(|i| ((i + 128) % 256 - 128) as i8) + .collect::>() + .try_into() + .expect("i8 data incorrectly initialised") +}); +static I16_DATA: LazyLock<[i16; 128 * 5]> = LazyLock::new(|| { + (0..128 * 5) + .map(|i| i as i16) + .collect::>() + .try_into() + .expect("i16 data incorrectly initialised") +}); +static I32_DATA: LazyLock<[i32; 64 * 5]> = LazyLock::new(|| { + (0..64 * 5) + .map(|i| i as i32) + .collect::>() + .try_into() + .expect("i32 data incorrectly initialised") +}); +static I64_DATA: LazyLock<[i64; 32 * 5]> = LazyLock::new(|| { + (0..32 * 5) + .map(|i| i as i64) + .collect::>() + .try_into() + .expect("i64 data incorrectly initialised") +}); +static U8_DATA: LazyLock<[u8; 256 * 5]> = LazyLock::new(|| { + (0..256 * 5) + .map(|i| i as u8) + .collect::>() + .try_into() + .expect("u8 data incorrectly initialised") +}); +static U16_DATA: LazyLock<[u16; 128 * 5]> = LazyLock::new(|| { + (0..128 * 5) + .map(|i| i as u16) + .collect::>() + .try_into() + .expect("u16 data incorrectly initialised") +}); +static U32_DATA: LazyLock<[u32; 64 * 5]> = LazyLock::new(|| { + (0..64 * 5) + .map(|i| i as u32) + .collect::>() + .try_into() + .expect("u32 data incorrectly initialised") +}); +static U64_DATA: LazyLock<[u64; 32 * 5]> = LazyLock::new(|| { + (0..32 * 5) + .map(|i| i as u64) + .collect::>() + .try_into() + .expect("u64 data incorrectly initialised") +}); +#[target_feature(enable = "sve")] +fn assert_vector_matches_f32(vector: svfloat32_t, expected: svfloat32_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b32(), defined)); + let cmp = svcmpne_f32(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_f64(vector: svfloat64_t, expected: svfloat64_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b64(), defined)); + let cmp = svcmpne_f64(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i8(vector: svint8_t, expected: svint8_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b8(), defined)); + let cmp = svcmpne_s8(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i16(vector: svint16_t, expected: svint16_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b16(), defined)); + let cmp = svcmpne_s16(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i32(vector: svint32_t, expected: svint32_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b32(), defined)); + let cmp = svcmpne_s32(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i64(vector: svint64_t, expected: svint64_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b64(), defined)); + let cmp = svcmpne_s64(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u8(vector: svuint8_t, expected: svuint8_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b8(), defined)); + let cmp = svcmpne_u8(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u16(vector: svuint16_t, expected: svuint16_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b16(), defined)); + let cmp = svcmpne_u16(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u32(vector: svuint32_t, expected: svuint32_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b32(), defined)); + let cmp = svcmpne_u32(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u64(vector: svuint64_t, expected: svuint64_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b64(), defined)); + let cmp = svcmpne_u64(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_f32_with_svst1_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + svst1_f32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_f32(svptrue_b32(), storage.as_ptr() as *const f32); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_f64_with_svst1_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + svst1_f64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_f64(svptrue_b64(), storage.as_ptr() as *const f64); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_s8_with_svst1_s8() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_s8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1_s8(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i8( + loaded, + svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_s16_with_svst1_s16() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_s16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1_s16(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_s32_with_svst1_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_s32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_s32(svptrue_b32(), storage.as_ptr() as *const i32); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_s64_with_svst1_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_s64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_s64(svptrue_b64(), storage.as_ptr() as *const i64); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_u8_with_svst1_u8() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_u8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1_u8(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u8( + loaded, + svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_u16_with_svst1_u16() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_u16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1_u16(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_u32_with_svst1_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_u32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_u32(svptrue_b32(), storage.as_ptr() as *const u32); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_u64_with_svst1_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_u64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_u64(svptrue_b64(), storage.as_ptr() as *const u64); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s32index_f32_with_svst1_scatter_s32index_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let indices = svindex_s32(0, 1); + svst1_scatter_s32index_f32(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_gather_s32index_f32(svptrue_b32(), storage.as_ptr() as *const f32, indices); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s32index_s32_with_svst1_scatter_s32index_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s32(0, 1); + svst1_scatter_s32index_s32(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_gather_s32index_s32(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s32index_u32_with_svst1_scatter_s32index_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s32(0, 1); + svst1_scatter_s32index_u32(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_gather_s32index_u32(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s64index_f64_with_svst1_scatter_s64index_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let indices = svindex_s64(0, 1); + svst1_scatter_s64index_f64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_gather_s64index_f64(svptrue_b64(), storage.as_ptr() as *const f64, indices); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s64index_s64_with_svst1_scatter_s64index_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1_scatter_s64index_s64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_gather_s64index_s64(svptrue_b64(), storage.as_ptr() as *const i64, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s64index_u64_with_svst1_scatter_s64index_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1_scatter_s64index_u64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_gather_s64index_u64(svptrue_b64(), storage.as_ptr() as *const u64, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32index_f32_with_svst1_scatter_u32index_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let indices = svindex_u32(0, 1); + svst1_scatter_u32index_f32(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_gather_u32index_f32(svptrue_b32(), storage.as_ptr() as *const f32, indices); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32index_s32_with_svst1_scatter_u32index_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u32(0, 1); + svst1_scatter_u32index_s32(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_gather_u32index_s32(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32index_u32_with_svst1_scatter_u32index_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u32(0, 1); + svst1_scatter_u32index_u32(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_gather_u32index_u32(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64index_f64_with_svst1_scatter_u64index_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let indices = svindex_u64(0, 1); + svst1_scatter_u64index_f64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_gather_u64index_f64(svptrue_b64(), storage.as_ptr() as *const f64, indices); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64index_s64_with_svst1_scatter_u64index_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1_scatter_u64index_s64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_gather_u64index_s64(svptrue_b64(), storage.as_ptr() as *const i64, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64index_u64_with_svst1_scatter_u64index_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1_scatter_u64index_u64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_gather_u64index_u64(svptrue_b64(), storage.as_ptr() as *const u64, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s32offset_f32_with_svst1_scatter_s32offset_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let offsets = svindex_s32(0, 4u32.try_into().unwrap()); + svst1_scatter_s32offset_f32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_gather_s32offset_f32(svptrue_b32(), storage.as_ptr() as *const f32, offsets); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s32offset_s32_with_svst1_scatter_s32offset_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 4u32.try_into().unwrap()); + svst1_scatter_s32offset_s32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_gather_s32offset_s32(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s32offset_u32_with_svst1_scatter_s32offset_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 4u32.try_into().unwrap()); + svst1_scatter_s32offset_u32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_gather_s32offset_u32(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s64offset_f64_with_svst1_scatter_s64offset_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); + svst1_scatter_s64offset_f64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_gather_s64offset_f64(svptrue_b64(), storage.as_ptr() as *const f64, offsets); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s64offset_s64_with_svst1_scatter_s64offset_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); + svst1_scatter_s64offset_s64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_gather_s64offset_s64(svptrue_b64(), storage.as_ptr() as *const i64, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s64offset_u64_with_svst1_scatter_s64offset_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); + svst1_scatter_s64offset_u64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_gather_s64offset_u64(svptrue_b64(), storage.as_ptr() as *const u64, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32offset_f32_with_svst1_scatter_u32offset_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32offset_f32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_gather_u32offset_f32(svptrue_b32(), storage.as_ptr() as *const f32, offsets); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32offset_s32_with_svst1_scatter_u32offset_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32offset_s32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_gather_u32offset_s32(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32offset_u32_with_svst1_scatter_u32offset_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32offset_u32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_gather_u32offset_u32(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64offset_f64_with_svst1_scatter_u64offset_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + svst1_scatter_u64offset_f64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_gather_u64offset_f64(svptrue_b64(), storage.as_ptr() as *const f64, offsets); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64offset_s64_with_svst1_scatter_u64offset_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + svst1_scatter_u64offset_s64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_gather_u64offset_s64(svptrue_b64(), storage.as_ptr() as *const i64, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64offset_u64_with_svst1_scatter_u64offset_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + svst1_scatter_u64offset_u64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_gather_u64offset_u64(svptrue_b64(), storage.as_ptr() as *const u64, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_f64_with_svst1_scatter_u64base_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_f64(svptrue_b64(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_gather_u64base_f64(svptrue_b64(), bases); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_s64_with_svst1_scatter_u64base_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_s64(svptrue_b64(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_gather_u64base_s64(svptrue_b64(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_u64_with_svst1_scatter_u64base_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_u64(svptrue_b64(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_gather_u64base_u64(svptrue_b64(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32base_index_f32_with_svst1_scatter_u32base_index_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32base_index_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_gather_u32base_index_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + ); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32base_index_s32_with_svst1_scatter_u32base_index_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32base_index_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_gather_u32base_index_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32base_index_u32_with_svst1_scatter_u32base_index_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32base_index_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_gather_u32base_index_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_index_f64_with_svst1_scatter_u64base_index_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_index_f64(svptrue_b64(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_gather_u64base_index_f64(svptrue_b64(), bases, 1.try_into().unwrap()); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_index_s64_with_svst1_scatter_u64base_index_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_index_s64(svptrue_b64(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_gather_u64base_index_s64(svptrue_b64(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_index_u64_with_svst1_scatter_u64base_index_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_index_u64(svptrue_b64(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_gather_u64base_index_u64(svptrue_b64(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32base_offset_f32_with_svst1_scatter_u32base_offset_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32base_offset_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_gather_u32base_offset_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + ); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32base_offset_s32_with_svst1_scatter_u32base_offset_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32base_offset_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_gather_u32base_offset_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32base_offset_u32_with_svst1_scatter_u32base_offset_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32base_offset_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_gather_u32base_offset_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_offset_f64_with_svst1_scatter_u64base_offset_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_offset_f64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_gather_u64base_offset_f64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_offset_s64_with_svst1_scatter_u64base_offset_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_offset_s64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_gather_u64base_offset_s64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_offset_u64_with_svst1_scatter_u64base_offset_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_offset_u64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_gather_u64base_offset_u64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_f32_with_svst1_vnum_f32() { + let len = svcntw() as usize; + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); + svst1_vnum_f32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_vnum_f32(svptrue_b32(), storage.as_ptr() as *const f32, 1); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_f64_with_svst1_vnum_f64() { + let len = svcntd() as usize; + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); + svst1_vnum_f64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_vnum_f64(svptrue_b64(), storage.as_ptr() as *const f64, 1); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_s8_with_svst1_vnum_s8() { + let len = svcntb() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i8( + loaded, + svindex_s8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_s16_with_svst1_vnum_s16() { + let len = svcnth() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_s32_with_svst1_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1); + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_s64_with_svst1_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_u8_with_svst1_vnum_u8() { + let len = svcntb() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u8( + loaded, + svindex_u8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_u16_with_svst1_vnum_u16() { + let len = svcnth() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_u32_with_svst1_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1); + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_u64_with_svst1_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_f32() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_f32 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_f32(svptrue_b32(), F32_DATA.as_ptr()); + assert_vector_matches_f32( + loaded, + svtrn1q_f32( + svdupq_n_f32(0usize as f32, 1usize as f32, 2usize as f32, 3usize as f32), + svdupq_n_f32(4usize as f32, 5usize as f32, 6usize as f32, 7usize as f32), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_f64() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_f64 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_f64(svptrue_b64(), F64_DATA.as_ptr()); + assert_vector_matches_f64( + loaded, + svtrn1q_f64( + svdupq_n_f64(0usize as f64, 1usize as f64), + svdupq_n_f64(2usize as f64, 3usize as f64), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_s8() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_s8 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_s8(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i8( + loaded, + svtrn1q_s8( + svdupq_n_s8( + 0usize as i8, + 1usize as i8, + 2usize as i8, + 3usize as i8, + 4usize as i8, + 5usize as i8, + 6usize as i8, + 7usize as i8, + 8usize as i8, + 9usize as i8, + 10usize as i8, + 11usize as i8, + 12usize as i8, + 13usize as i8, + 14usize as i8, + 15usize as i8, + ), + svdupq_n_s8( + 16usize as i8, + 17usize as i8, + 18usize as i8, + 19usize as i8, + 20usize as i8, + 21usize as i8, + 22usize as i8, + 23usize as i8, + 24usize as i8, + 25usize as i8, + 26usize as i8, + 27usize as i8, + 28usize as i8, + 29usize as i8, + 30usize as i8, + 31usize as i8, + ), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_s16() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_s16 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_s16(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svtrn1q_s16( + svdupq_n_s16( + 0usize as i16, + 1usize as i16, + 2usize as i16, + 3usize as i16, + 4usize as i16, + 5usize as i16, + 6usize as i16, + 7usize as i16, + ), + svdupq_n_s16( + 8usize as i16, + 9usize as i16, + 10usize as i16, + 11usize as i16, + 12usize as i16, + 13usize as i16, + 14usize as i16, + 15usize as i16, + ), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_s32() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_s32 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_s32(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svtrn1q_s32( + svdupq_n_s32(0usize as i32, 1usize as i32, 2usize as i32, 3usize as i32), + svdupq_n_s32(4usize as i32, 5usize as i32, 6usize as i32, 7usize as i32), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_s64() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_s64 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_s64(svptrue_b64(), I64_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svtrn1q_s64( + svdupq_n_s64(0usize as i64, 1usize as i64), + svdupq_n_s64(2usize as i64, 3usize as i64), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_u8() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_u8 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_u8(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u8( + loaded, + svtrn1q_u8( + svdupq_n_u8( + 0usize as u8, + 1usize as u8, + 2usize as u8, + 3usize as u8, + 4usize as u8, + 5usize as u8, + 6usize as u8, + 7usize as u8, + 8usize as u8, + 9usize as u8, + 10usize as u8, + 11usize as u8, + 12usize as u8, + 13usize as u8, + 14usize as u8, + 15usize as u8, + ), + svdupq_n_u8( + 16usize as u8, + 17usize as u8, + 18usize as u8, + 19usize as u8, + 20usize as u8, + 21usize as u8, + 22usize as u8, + 23usize as u8, + 24usize as u8, + 25usize as u8, + 26usize as u8, + 27usize as u8, + 28usize as u8, + 29usize as u8, + 30usize as u8, + 31usize as u8, + ), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_u16() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_u16 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_u16(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svtrn1q_u16( + svdupq_n_u16( + 0usize as u16, + 1usize as u16, + 2usize as u16, + 3usize as u16, + 4usize as u16, + 5usize as u16, + 6usize as u16, + 7usize as u16, + ), + svdupq_n_u16( + 8usize as u16, + 9usize as u16, + 10usize as u16, + 11usize as u16, + 12usize as u16, + 13usize as u16, + 14usize as u16, + 15usize as u16, + ), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_u32() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_u32 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_u32(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svtrn1q_u32( + svdupq_n_u32(0usize as u32, 1usize as u32, 2usize as u32, 3usize as u32), + svdupq_n_u32(4usize as u32, 5usize as u32, 6usize as u32, 7usize as u32), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_u64() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_u64 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_u64(svptrue_b64(), U64_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svtrn1q_u64( + svdupq_n_u64(0usize as u64, 1usize as u64), + svdupq_n_u64(2usize as u64, 3usize as u64), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_f32() { + svsetffr(); + let loaded = svld1rq_f32(svptrue_b32(), F32_DATA.as_ptr()); + assert_vector_matches_f32( + loaded, + svdupq_n_f32(0usize as f32, 1usize as f32, 2usize as f32, 3usize as f32), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_f64() { + svsetffr(); + let loaded = svld1rq_f64(svptrue_b64(), F64_DATA.as_ptr()); + assert_vector_matches_f64(loaded, svdupq_n_f64(0usize as f64, 1usize as f64)); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_s8() { + svsetffr(); + let loaded = svld1rq_s8(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i8( + loaded, + svdupq_n_s8( + 0usize as i8, + 1usize as i8, + 2usize as i8, + 3usize as i8, + 4usize as i8, + 5usize as i8, + 6usize as i8, + 7usize as i8, + 8usize as i8, + 9usize as i8, + 10usize as i8, + 11usize as i8, + 12usize as i8, + 13usize as i8, + 14usize as i8, + 15usize as i8, + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_s16() { + svsetffr(); + let loaded = svld1rq_s16(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svdupq_n_s16( + 0usize as i16, + 1usize as i16, + 2usize as i16, + 3usize as i16, + 4usize as i16, + 5usize as i16, + 6usize as i16, + 7usize as i16, + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_s32() { + svsetffr(); + let loaded = svld1rq_s32(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svdupq_n_s32(0usize as i32, 1usize as i32, 2usize as i32, 3usize as i32), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_s64() { + svsetffr(); + let loaded = svld1rq_s64(svptrue_b64(), I64_DATA.as_ptr()); + assert_vector_matches_i64(loaded, svdupq_n_s64(0usize as i64, 1usize as i64)); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_u8() { + svsetffr(); + let loaded = svld1rq_u8(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u8( + loaded, + svdupq_n_u8( + 0usize as u8, + 1usize as u8, + 2usize as u8, + 3usize as u8, + 4usize as u8, + 5usize as u8, + 6usize as u8, + 7usize as u8, + 8usize as u8, + 9usize as u8, + 10usize as u8, + 11usize as u8, + 12usize as u8, + 13usize as u8, + 14usize as u8, + 15usize as u8, + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_u16() { + svsetffr(); + let loaded = svld1rq_u16(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svdupq_n_u16( + 0usize as u16, + 1usize as u16, + 2usize as u16, + 3usize as u16, + 4usize as u16, + 5usize as u16, + 6usize as u16, + 7usize as u16, + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_u32() { + svsetffr(); + let loaded = svld1rq_u32(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svdupq_n_u32(0usize as u32, 1usize as u32, 2usize as u32, 3usize as u32), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_u64() { + svsetffr(); + let loaded = svld1rq_u64(svptrue_b64(), U64_DATA.as_ptr()); + assert_vector_matches_u64(loaded, svdupq_n_u64(0usize as u64, 1usize as u64)); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_s32offset_s32_with_svst1b_scatter_s32offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 1u32.try_into().unwrap()); + svst1b_scatter_s32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_s32offset_s32(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s32offset_s32_with_svst1h_scatter_s32offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 2u32.try_into().unwrap()); + svst1h_scatter_s32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_s32offset_s32(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_s32offset_u32_with_svst1b_scatter_s32offset_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 1u32.try_into().unwrap()); + svst1b_scatter_s32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_gather_s32offset_u32(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s32offset_u32_with_svst1h_scatter_s32offset_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 2u32.try_into().unwrap()); + svst1h_scatter_s32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_s32offset_u32(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_s64offset_s64_with_svst1b_scatter_s64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svst1b_scatter_s64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_s64offset_s64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s64offset_s64_with_svst1h_scatter_s64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svst1h_scatter_s64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_s64offset_s64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_s64offset_s64_with_svst1w_scatter_s64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svst1w_scatter_s64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1sw_gather_s64offset_s64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_s64offset_u64_with_svst1b_scatter_s64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svst1b_scatter_s64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_gather_s64offset_u64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s64offset_u64_with_svst1h_scatter_s64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svst1h_scatter_s64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_s64offset_u64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_s64offset_u64_with_svst1w_scatter_s64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svst1w_scatter_s64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1sw_gather_s64offset_u64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u32offset_s32_with_svst1b_scatter_u32offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u32offset_s32(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32offset_s32_with_svst1h_scatter_u32offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_u32offset_s32(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u32offset_u32_with_svst1b_scatter_u32offset_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_gather_u32offset_u32(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32offset_u32_with_svst1h_scatter_u32offset_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_u32offset_u32(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u64offset_s64_with_svst1b_scatter_u64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svst1b_scatter_u64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u64offset_s64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64offset_s64_with_svst1h_scatter_u64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svst1h_scatter_u64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_u64offset_s64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64offset_s64_with_svst1w_scatter_u64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svst1w_scatter_u64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1sw_gather_u64offset_s64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u64offset_u64_with_svst1b_scatter_u64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svst1b_scatter_u64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_gather_u64offset_u64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64offset_u64_with_svst1h_scatter_u64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svst1h_scatter_u64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_u64offset_u64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64offset_u64_with_svst1w_scatter_u64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svst1w_scatter_u64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1sw_gather_u64offset_u64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u32base_offset_s32_with_svst1b_scatter_u32base_offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32base_offset_s32_with_svst1h_scatter_u32base_offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u32base_offset_u32_with_svst1b_scatter_u32base_offset_u32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32base_offset_u32_with_svst1h_scatter_u32base_offset_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u64base_offset_s64_with_svst1b_scatter_u64base_offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64base_offset_s64_with_svst1h_scatter_u64base_offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64base_offset_s64_with_svst1w_scatter_u64base_offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u64base_offset_u64_with_svst1b_scatter_u64base_offset_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64base_offset_u64_with_svst1h_scatter_u64base_offset_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64base_offset_u64_with_svst1w_scatter_u64base_offset_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u64base_s64_with_svst1b_scatter_u64base_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_s64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u64base_s64(svptrue_b8(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64base_s64_with_svst1h_scatter_u64base_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_s64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u64base_s64(svptrue_b16(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64base_s64_with_svst1w_scatter_u64base_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_s64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_gather_u64base_s64(svptrue_b32(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u64base_u64_with_svst1b_scatter_u64base_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_u64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u64base_u64(svptrue_b8(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64base_u64_with_svst1h_scatter_u64base_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_u64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u64base_u64(svptrue_b16(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64base_u64_with_svst1w_scatter_u64base_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_u64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_gather_u64base_u64(svptrue_b32(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_s16_with_svst1b_s16() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_s16(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_s16(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_s32_with_svst1b_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_s32(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_s32(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_s32_with_svst1h_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_s32(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_s32(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_s64_with_svst1b_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_s64(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_s64(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_s64_with_svst1h_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_s64(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_s64(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_s64_with_svst1w_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1w_s64(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_s64(svptrue_b32(), storage.as_ptr() as *const i32); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_u16_with_svst1b_u16() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_u16(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_u16(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_u32_with_svst1b_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_u32(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_u32(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_u32_with_svst1h_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_u32(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1sh_u32(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_u64_with_svst1b_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_u64(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_u64(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_u64_with_svst1h_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_u64(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1sh_u64(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_u64_with_svst1w_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1w_u64(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1sw_u64(svptrue_b32(), storage.as_ptr() as *const i32); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_vnum_s16_with_svst1b_vnum_s16() { + let len = svcnth() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_s16(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_vnum_s16(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_vnum_s32_with_svst1b_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_s32(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_vnum_s32(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_vnum_s32_with_svst1h_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_s32(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_vnum_s32(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_vnum_s64_with_svst1b_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_s64(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_vnum_s64(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_vnum_s64_with_svst1h_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_s64(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_vnum_s64(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_vnum_s64_with_svst1w_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1w_vnum_s64(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_vnum_s64(svptrue_b32(), storage.as_ptr() as *const i32, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_vnum_u16_with_svst1b_vnum_u16() { + let len = svcnth() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_u16(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_vnum_u16(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_vnum_u32_with_svst1b_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_u32(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_vnum_u32(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_vnum_u32_with_svst1h_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_u32(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1sh_vnum_u32(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_vnum_u64_with_svst1b_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_u64(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_vnum_u64(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_vnum_u64_with_svst1h_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_u64(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1sh_vnum_u64(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_vnum_u64_with_svst1w_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1w_vnum_u64(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1sw_vnum_u64(svptrue_b32(), storage.as_ptr() as *const i32, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s32index_s32_with_svst1h_scatter_s32index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s32(0, 1); + svst1h_scatter_s32index_s32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_s32index_s32(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s32index_u32_with_svst1h_scatter_s32index_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s32(0, 1); + svst1h_scatter_s32index_u32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_s32index_u32(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s64index_s64_with_svst1h_scatter_s64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1h_scatter_s64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_s64index_s64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_s64index_s64_with_svst1w_scatter_s64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1w_scatter_s64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1sw_gather_s64index_s64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s64index_u64_with_svst1h_scatter_s64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1h_scatter_s64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_s64index_u64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_s64index_u64_with_svst1w_scatter_s64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1w_scatter_s64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1sw_gather_s64index_u64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32index_s32_with_svst1h_scatter_u32index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u32(0, 1); + svst1h_scatter_u32index_s32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_u32index_s32(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32index_u32_with_svst1h_scatter_u32index_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u32(0, 1); + svst1h_scatter_u32index_u32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_u32index_u32(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64index_s64_with_svst1h_scatter_u64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1h_scatter_u64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_u64index_s64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64index_s64_with_svst1w_scatter_u64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1w_scatter_u64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1sw_gather_u64index_s64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64index_u64_with_svst1h_scatter_u64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1h_scatter_u64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_u64index_u64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64index_u64_with_svst1w_scatter_u64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1w_scatter_u64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1sw_gather_u64index_u64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32base_index_s32_with_svst1h_scatter_u32base_index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32base_index_u32_with_svst1h_scatter_u32base_index_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64base_index_s64_with_svst1h_scatter_u64base_index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64base_index_s64_with_svst1w_scatter_u64base_index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64base_index_u64_with_svst1h_scatter_u64base_index_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64base_index_u64_with_svst1w_scatter_u64base_index_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_s32offset_s32_with_svst1b_scatter_s32offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 1u32.try_into().unwrap()); + svst1b_scatter_s32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_s32offset_s32(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s32offset_s32_with_svst1h_scatter_s32offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 2u32.try_into().unwrap()); + svst1h_scatter_s32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_s32offset_s32(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_s32offset_u32_with_svst1b_scatter_s32offset_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 1u32.try_into().unwrap()); + svst1b_scatter_s32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_gather_s32offset_u32(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s32offset_u32_with_svst1h_scatter_s32offset_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 2u32.try_into().unwrap()); + svst1h_scatter_s32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_s32offset_u32(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_s64offset_s64_with_svst1b_scatter_s64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svst1b_scatter_s64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_s64offset_s64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s64offset_s64_with_svst1h_scatter_s64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svst1h_scatter_s64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_s64offset_s64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_s64offset_s64_with_svst1w_scatter_s64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svst1w_scatter_s64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1uw_gather_s64offset_s64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_s64offset_u64_with_svst1b_scatter_s64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svst1b_scatter_s64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_gather_s64offset_u64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s64offset_u64_with_svst1h_scatter_s64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svst1h_scatter_s64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_s64offset_u64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_s64offset_u64_with_svst1w_scatter_s64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svst1w_scatter_s64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1uw_gather_s64offset_u64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u32offset_s32_with_svst1b_scatter_u32offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u32offset_s32(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32offset_s32_with_svst1h_scatter_u32offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_u32offset_s32(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u32offset_u32_with_svst1b_scatter_u32offset_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_gather_u32offset_u32(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32offset_u32_with_svst1h_scatter_u32offset_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_u32offset_u32(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u64offset_s64_with_svst1b_scatter_u64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svst1b_scatter_u64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u64offset_s64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64offset_s64_with_svst1h_scatter_u64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svst1h_scatter_u64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_u64offset_s64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64offset_s64_with_svst1w_scatter_u64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svst1w_scatter_u64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1uw_gather_u64offset_s64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u64offset_u64_with_svst1b_scatter_u64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svst1b_scatter_u64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_gather_u64offset_u64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64offset_u64_with_svst1h_scatter_u64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svst1h_scatter_u64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_u64offset_u64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64offset_u64_with_svst1w_scatter_u64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svst1w_scatter_u64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1uw_gather_u64offset_u64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u32base_offset_s32_with_svst1b_scatter_u32base_offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32base_offset_s32_with_svst1h_scatter_u32base_offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u32base_offset_u32_with_svst1b_scatter_u32base_offset_u32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32base_offset_u32_with_svst1h_scatter_u32base_offset_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u64base_offset_s64_with_svst1b_scatter_u64base_offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64base_offset_s64_with_svst1h_scatter_u64base_offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64base_offset_s64_with_svst1w_scatter_u64base_offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u64base_offset_u64_with_svst1b_scatter_u64base_offset_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64base_offset_u64_with_svst1h_scatter_u64base_offset_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64base_offset_u64_with_svst1w_scatter_u64base_offset_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u64base_s64_with_svst1b_scatter_u64base_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_s64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u64base_s64(svptrue_b8(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64base_s64_with_svst1h_scatter_u64base_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_s64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u64base_s64(svptrue_b16(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64base_s64_with_svst1w_scatter_u64base_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_s64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_gather_u64base_s64(svptrue_b32(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u64base_u64_with_svst1b_scatter_u64base_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_u64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u64base_u64(svptrue_b8(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64base_u64_with_svst1h_scatter_u64base_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_u64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u64base_u64(svptrue_b16(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64base_u64_with_svst1w_scatter_u64base_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_u64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_gather_u64base_u64(svptrue_b32(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_s16_with_svst1b_s16() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_s16(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_s16(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_s32_with_svst1b_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_s32(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_s32(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_s32_with_svst1h_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_s32(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_s32(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_s64_with_svst1b_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_s64(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_s64(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_s64_with_svst1h_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_s64(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_s64(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_s64_with_svst1w_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1w_s64(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_s64(svptrue_b32(), storage.as_ptr() as *const u32); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_u16_with_svst1b_u16() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_u16(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_u16(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_u32_with_svst1b_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_u32(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_u32(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_u32_with_svst1h_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_u32(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1uh_u32(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_u64_with_svst1b_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_u64(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_u64(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_u64_with_svst1h_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_u64(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1uh_u64(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_u64_with_svst1w_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1w_u64(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1uw_u64(svptrue_b32(), storage.as_ptr() as *const u32); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_vnum_s16_with_svst1b_vnum_s16() { + let len = svcnth() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_s16(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_vnum_s16(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_vnum_s32_with_svst1b_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_s32(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_vnum_s32(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_vnum_s32_with_svst1h_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_s32(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_vnum_s32(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_vnum_s64_with_svst1b_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_s64(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_vnum_s64(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_vnum_s64_with_svst1h_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_s64(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_vnum_s64(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_vnum_s64_with_svst1w_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1w_vnum_s64(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_vnum_s64(svptrue_b32(), storage.as_ptr() as *const u32, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_vnum_u16_with_svst1b_vnum_u16() { + let len = svcnth() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_u16(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_vnum_u16(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_vnum_u32_with_svst1b_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_u32(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_vnum_u32(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_vnum_u32_with_svst1h_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_u32(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1uh_vnum_u32(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_vnum_u64_with_svst1b_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_u64(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_vnum_u64(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_vnum_u64_with_svst1h_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_u64(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1uh_vnum_u64(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_vnum_u64_with_svst1w_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1w_vnum_u64(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1uw_vnum_u64(svptrue_b32(), storage.as_ptr() as *const u32, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s32index_s32_with_svst1h_scatter_s32index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s32(0, 1); + svst1h_scatter_s32index_s32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_s32index_s32(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s32index_u32_with_svst1h_scatter_s32index_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s32(0, 1); + svst1h_scatter_s32index_u32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_s32index_u32(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s64index_s64_with_svst1h_scatter_s64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1h_scatter_s64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_s64index_s64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_s64index_s64_with_svst1w_scatter_s64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1w_scatter_s64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1uw_gather_s64index_s64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s64index_u64_with_svst1h_scatter_s64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1h_scatter_s64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_s64index_u64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_s64index_u64_with_svst1w_scatter_s64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1w_scatter_s64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1uw_gather_s64index_u64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32index_s32_with_svst1h_scatter_u32index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u32(0, 1); + svst1h_scatter_u32index_s32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_u32index_s32(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32index_u32_with_svst1h_scatter_u32index_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u32(0, 1); + svst1h_scatter_u32index_u32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_u32index_u32(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64index_s64_with_svst1h_scatter_u64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1h_scatter_u64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_u64index_s64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64index_s64_with_svst1w_scatter_u64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1w_scatter_u64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1uw_gather_u64index_s64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64index_u64_with_svst1h_scatter_u64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1h_scatter_u64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_u64index_u64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64index_u64_with_svst1w_scatter_u64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1w_scatter_u64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1uw_gather_u64index_u64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32base_index_s32_with_svst1h_scatter_u32base_index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32base_index_u32_with_svst1h_scatter_u32base_index_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64base_index_s64_with_svst1h_scatter_u64base_index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64base_index_s64_with_svst1w_scatter_u64base_index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64base_index_u64_with_svst1h_scatter_u64base_index_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64base_index_u64_with_svst1w_scatter_u64base_index_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_f32_with_svst2_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcreate2_f32( + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + ); + svst2_f32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld2_f32(svptrue_b32(), storage.as_ptr() as *const f32); + assert_vector_matches_f32( + svget2_f32::<{ 0usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f32( + svget2_f32::<{ 1usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_f64_with_svst2_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcreate2_f64( + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + ); + svst2_f64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld2_f64(svptrue_b64(), storage.as_ptr() as *const f64); + assert_vector_matches_f64( + svget2_f64::<{ 0usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f64( + svget2_f64::<{ 1usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_s8_with_svst2_s8() { + let mut storage = [0 as i8; 1280usize]; + let data = svcreate2_s8( + svindex_s8((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_s8((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_s8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld2_s8(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i8( + svget2_s8::<{ 0usize as i32 }>(loaded), + svindex_s8((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_i8( + svget2_s8::<{ 1usize as i32 }>(loaded), + svindex_s8((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_s16_with_svst2_s16() { + let mut storage = [0 as i16; 640usize]; + let data = svcreate2_s16( + svindex_s16((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_s16((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_s16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld2_s16(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_i16( + svget2_s16::<{ 0usize as i32 }>(loaded), + svindex_s16((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_i16( + svget2_s16::<{ 1usize as i32 }>(loaded), + svindex_s16((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_s32_with_svst2_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svcreate2_s32( + svindex_s32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_s32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_s32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld2_s32(svptrue_b32(), storage.as_ptr() as *const i32); + assert_vector_matches_i32( + svget2_s32::<{ 0usize as i32 }>(loaded), + svindex_s32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_i32( + svget2_s32::<{ 1usize as i32 }>(loaded), + svindex_s32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_s64_with_svst2_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svcreate2_s64( + svindex_s64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_s64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_s64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld2_s64(svptrue_b64(), storage.as_ptr() as *const i64); + assert_vector_matches_i64( + svget2_s64::<{ 0usize as i32 }>(loaded), + svindex_s64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_i64( + svget2_s64::<{ 1usize as i32 }>(loaded), + svindex_s64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_u8_with_svst2_u8() { + let mut storage = [0 as u8; 1280usize]; + let data = svcreate2_u8( + svindex_u8((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_u8((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_u8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld2_u8(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u8( + svget2_u8::<{ 0usize as i32 }>(loaded), + svindex_u8((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_u8( + svget2_u8::<{ 1usize as i32 }>(loaded), + svindex_u8((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_u16_with_svst2_u16() { + let mut storage = [0 as u16; 640usize]; + let data = svcreate2_u16( + svindex_u16((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_u16((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_u16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld2_u16(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_u16( + svget2_u16::<{ 0usize as i32 }>(loaded), + svindex_u16((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_u16( + svget2_u16::<{ 1usize as i32 }>(loaded), + svindex_u16((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_u32_with_svst2_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svcreate2_u32( + svindex_u32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_u32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_u32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld2_u32(svptrue_b32(), storage.as_ptr() as *const u32); + assert_vector_matches_u32( + svget2_u32::<{ 0usize as i32 }>(loaded), + svindex_u32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_u32( + svget2_u32::<{ 1usize as i32 }>(loaded), + svindex_u32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_u64_with_svst2_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svcreate2_u64( + svindex_u64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_u64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_u64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld2_u64(svptrue_b64(), storage.as_ptr() as *const u64); + assert_vector_matches_u64( + svget2_u64::<{ 0usize as i32 }>(loaded), + svindex_u64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_u64( + svget2_u64::<{ 1usize as i32 }>(loaded), + svindex_u64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_f32_with_svst2_vnum_f32() { + let len = svcntw() as usize; + let mut storage = [0 as f32; 320usize]; + let data = svcreate2_f32( + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + ); + svst2_vnum_f32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld2_vnum_f32(svptrue_b32(), storage.as_ptr() as *const f32, 1); + assert_vector_matches_f32( + svget2_f32::<{ 0usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f32( + svget2_f32::<{ 1usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_f64_with_svst2_vnum_f64() { + let len = svcntd() as usize; + let mut storage = [0 as f64; 160usize]; + let data = svcreate2_f64( + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + ); + svst2_vnum_f64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld2_vnum_f64(svptrue_b64(), storage.as_ptr() as *const f64, 1); + assert_vector_matches_f64( + svget2_f64::<{ 0usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f64( + svget2_f64::<{ 1usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_s8_with_svst2_vnum_s8() { + let len = svcntb() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svcreate2_s8( + svindex_s8( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_s8( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld2_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i8( + svget2_s8::<{ 0usize as i32 }>(loaded), + svindex_s8( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i8( + svget2_s8::<{ 1usize as i32 }>(loaded), + svindex_s8( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_s16_with_svst2_vnum_s16() { + let len = svcnth() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svcreate2_s16( + svindex_s16( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_s16( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld2_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_i16( + svget2_s16::<{ 0usize as i32 }>(loaded), + svindex_s16( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i16( + svget2_s16::<{ 1usize as i32 }>(loaded), + svindex_s16( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_s32_with_svst2_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i32; 320usize]; + let data = svcreate2_s32( + svindex_s32( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld2_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1); + assert_vector_matches_i32( + svget2_s32::<{ 0usize as i32 }>(loaded), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i32( + svget2_s32::<{ 1usize as i32 }>(loaded), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_s64_with_svst2_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i64; 160usize]; + let data = svcreate2_s64( + svindex_s64( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld2_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1); + assert_vector_matches_i64( + svget2_s64::<{ 0usize as i32 }>(loaded), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i64( + svget2_s64::<{ 1usize as i32 }>(loaded), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_u8_with_svst2_vnum_u8() { + let len = svcntb() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svcreate2_u8( + svindex_u8( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_u8( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld2_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u8( + svget2_u8::<{ 0usize as i32 }>(loaded), + svindex_u8( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u8( + svget2_u8::<{ 1usize as i32 }>(loaded), + svindex_u8( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_u16_with_svst2_vnum_u16() { + let len = svcnth() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svcreate2_u16( + svindex_u16( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_u16( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld2_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_u16( + svget2_u16::<{ 0usize as i32 }>(loaded), + svindex_u16( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u16( + svget2_u16::<{ 1usize as i32 }>(loaded), + svindex_u16( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_u32_with_svst2_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u32; 320usize]; + let data = svcreate2_u32( + svindex_u32( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_u32( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld2_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1); + assert_vector_matches_u32( + svget2_u32::<{ 0usize as i32 }>(loaded), + svindex_u32( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u32( + svget2_u32::<{ 1usize as i32 }>(loaded), + svindex_u32( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_u64_with_svst2_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u64; 160usize]; + let data = svcreate2_u64( + svindex_u64( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_u64( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld2_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1); + assert_vector_matches_u64( + svget2_u64::<{ 0usize as i32 }>(loaded), + svindex_u64( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u64( + svget2_u64::<{ 1usize as i32 }>(loaded), + svindex_u64( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_f32_with_svst3_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcreate3_f32( + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); + svst3_f32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld3_f32(svptrue_b32(), storage.as_ptr() as *const f32); + assert_vector_matches_f32( + svget3_f32::<{ 0usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f32( + svget3_f32::<{ 1usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f32( + svget3_f32::<{ 2usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_f64_with_svst3_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcreate3_f64( + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); + svst3_f64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld3_f64(svptrue_b64(), storage.as_ptr() as *const f64); + assert_vector_matches_f64( + svget3_f64::<{ 0usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f64( + svget3_f64::<{ 1usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f64( + svget3_f64::<{ 2usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_s8_with_svst3_s8() { + let mut storage = [0 as i8; 1280usize]; + let data = svcreate3_s8( + svindex_s8((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s8((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s8((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_s8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld3_s8(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i8( + svget3_s8::<{ 0usize as i32 }>(loaded), + svindex_s8((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i8( + svget3_s8::<{ 1usize as i32 }>(loaded), + svindex_s8((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i8( + svget3_s8::<{ 2usize as i32 }>(loaded), + svindex_s8((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_s16_with_svst3_s16() { + let mut storage = [0 as i16; 640usize]; + let data = svcreate3_s16( + svindex_s16((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s16((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s16((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_s16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld3_s16(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_i16( + svget3_s16::<{ 0usize as i32 }>(loaded), + svindex_s16((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i16( + svget3_s16::<{ 1usize as i32 }>(loaded), + svindex_s16((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i16( + svget3_s16::<{ 2usize as i32 }>(loaded), + svindex_s16((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_s32_with_svst3_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svcreate3_s32( + svindex_s32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_s32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld3_s32(svptrue_b32(), storage.as_ptr() as *const i32); + assert_vector_matches_i32( + svget3_s32::<{ 0usize as i32 }>(loaded), + svindex_s32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i32( + svget3_s32::<{ 1usize as i32 }>(loaded), + svindex_s32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i32( + svget3_s32::<{ 2usize as i32 }>(loaded), + svindex_s32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_s64_with_svst3_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svcreate3_s64( + svindex_s64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_s64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld3_s64(svptrue_b64(), storage.as_ptr() as *const i64); + assert_vector_matches_i64( + svget3_s64::<{ 0usize as i32 }>(loaded), + svindex_s64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i64( + svget3_s64::<{ 1usize as i32 }>(loaded), + svindex_s64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i64( + svget3_s64::<{ 2usize as i32 }>(loaded), + svindex_s64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_u8_with_svst3_u8() { + let mut storage = [0 as u8; 1280usize]; + let data = svcreate3_u8( + svindex_u8((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u8((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u8((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_u8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld3_u8(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u8( + svget3_u8::<{ 0usize as i32 }>(loaded), + svindex_u8((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u8( + svget3_u8::<{ 1usize as i32 }>(loaded), + svindex_u8((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u8( + svget3_u8::<{ 2usize as i32 }>(loaded), + svindex_u8((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_u16_with_svst3_u16() { + let mut storage = [0 as u16; 640usize]; + let data = svcreate3_u16( + svindex_u16((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u16((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u16((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_u16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld3_u16(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_u16( + svget3_u16::<{ 0usize as i32 }>(loaded), + svindex_u16((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u16( + svget3_u16::<{ 1usize as i32 }>(loaded), + svindex_u16((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u16( + svget3_u16::<{ 2usize as i32 }>(loaded), + svindex_u16((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_u32_with_svst3_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svcreate3_u32( + svindex_u32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_u32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld3_u32(svptrue_b32(), storage.as_ptr() as *const u32); + assert_vector_matches_u32( + svget3_u32::<{ 0usize as i32 }>(loaded), + svindex_u32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u32( + svget3_u32::<{ 1usize as i32 }>(loaded), + svindex_u32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u32( + svget3_u32::<{ 2usize as i32 }>(loaded), + svindex_u32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_u64_with_svst3_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svcreate3_u64( + svindex_u64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_u64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld3_u64(svptrue_b64(), storage.as_ptr() as *const u64); + assert_vector_matches_u64( + svget3_u64::<{ 0usize as i32 }>(loaded), + svindex_u64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u64( + svget3_u64::<{ 1usize as i32 }>(loaded), + svindex_u64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u64( + svget3_u64::<{ 2usize as i32 }>(loaded), + svindex_u64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_f32_with_svst3_vnum_f32() { + let len = svcntw() as usize; + let mut storage = [0 as f32; 320usize]; + let data = svcreate3_f32( + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); + svst3_vnum_f32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld3_vnum_f32(svptrue_b32(), storage.as_ptr() as *const f32, 1); + assert_vector_matches_f32( + svget3_f32::<{ 0usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f32( + svget3_f32::<{ 1usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f32( + svget3_f32::<{ 2usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_f64_with_svst3_vnum_f64() { + let len = svcntd() as usize; + let mut storage = [0 as f64; 160usize]; + let data = svcreate3_f64( + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); + svst3_vnum_f64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld3_vnum_f64(svptrue_b64(), storage.as_ptr() as *const f64, 1); + assert_vector_matches_f64( + svget3_f64::<{ 0usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f64( + svget3_f64::<{ 1usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f64( + svget3_f64::<{ 2usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_s8_with_svst3_vnum_s8() { + let len = svcntb() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svcreate3_s8( + svindex_s8( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s8( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s8( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld3_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i8( + svget3_s8::<{ 0usize as i32 }>(loaded), + svindex_s8( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i8( + svget3_s8::<{ 1usize as i32 }>(loaded), + svindex_s8( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i8( + svget3_s8::<{ 2usize as i32 }>(loaded), + svindex_s8( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_s16_with_svst3_vnum_s16() { + let len = svcnth() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svcreate3_s16( + svindex_s16( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s16( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s16( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld3_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_i16( + svget3_s16::<{ 0usize as i32 }>(loaded), + svindex_s16( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i16( + svget3_s16::<{ 1usize as i32 }>(loaded), + svindex_s16( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i16( + svget3_s16::<{ 2usize as i32 }>(loaded), + svindex_s16( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_s32_with_svst3_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i32; 320usize]; + let data = svcreate3_s32( + svindex_s32( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld3_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1); + assert_vector_matches_i32( + svget3_s32::<{ 0usize as i32 }>(loaded), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i32( + svget3_s32::<{ 1usize as i32 }>(loaded), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i32( + svget3_s32::<{ 2usize as i32 }>(loaded), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_s64_with_svst3_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i64; 160usize]; + let data = svcreate3_s64( + svindex_s64( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld3_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1); + assert_vector_matches_i64( + svget3_s64::<{ 0usize as i32 }>(loaded), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i64( + svget3_s64::<{ 1usize as i32 }>(loaded), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i64( + svget3_s64::<{ 2usize as i32 }>(loaded), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_u8_with_svst3_vnum_u8() { + let len = svcntb() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svcreate3_u8( + svindex_u8( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u8( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u8( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld3_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u8( + svget3_u8::<{ 0usize as i32 }>(loaded), + svindex_u8( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u8( + svget3_u8::<{ 1usize as i32 }>(loaded), + svindex_u8( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u8( + svget3_u8::<{ 2usize as i32 }>(loaded), + svindex_u8( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_u16_with_svst3_vnum_u16() { + let len = svcnth() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svcreate3_u16( + svindex_u16( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u16( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u16( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld3_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_u16( + svget3_u16::<{ 0usize as i32 }>(loaded), + svindex_u16( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u16( + svget3_u16::<{ 1usize as i32 }>(loaded), + svindex_u16( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u16( + svget3_u16::<{ 2usize as i32 }>(loaded), + svindex_u16( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_u32_with_svst3_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u32; 320usize]; + let data = svcreate3_u32( + svindex_u32( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u32( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u32( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld3_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1); + assert_vector_matches_u32( + svget3_u32::<{ 0usize as i32 }>(loaded), + svindex_u32( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u32( + svget3_u32::<{ 1usize as i32 }>(loaded), + svindex_u32( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u32( + svget3_u32::<{ 2usize as i32 }>(loaded), + svindex_u32( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_u64_with_svst3_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u64; 160usize]; + let data = svcreate3_u64( + svindex_u64( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u64( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u64( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld3_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1); + assert_vector_matches_u64( + svget3_u64::<{ 0usize as i32 }>(loaded), + svindex_u64( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u64( + svget3_u64::<{ 1usize as i32 }>(loaded), + svindex_u64( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u64( + svget3_u64::<{ 2usize as i32 }>(loaded), + svindex_u64( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_f32_with_svst4_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcreate4_f32( + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + svst4_f32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld4_f32(svptrue_b32(), storage.as_ptr() as *const f32); + assert_vector_matches_f32( + svget4_f32::<{ 0usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f32( + svget4_f32::<{ 1usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f32( + svget4_f32::<{ 2usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f32( + svget4_f32::<{ 3usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_f64_with_svst4_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcreate4_f64( + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + svst4_f64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld4_f64(svptrue_b64(), storage.as_ptr() as *const f64); + assert_vector_matches_f64( + svget4_f64::<{ 0usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f64( + svget4_f64::<{ 1usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f64( + svget4_f64::<{ 2usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f64( + svget4_f64::<{ 3usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_s8_with_svst4_s8() { + let mut storage = [0 as i8; 1280usize]; + let data = svcreate4_s8( + svindex_s8((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s8((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s8((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s8((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_s8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld4_s8(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i8( + svget4_s8::<{ 0usize as i32 }>(loaded), + svindex_s8((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i8( + svget4_s8::<{ 1usize as i32 }>(loaded), + svindex_s8((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i8( + svget4_s8::<{ 2usize as i32 }>(loaded), + svindex_s8((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i8( + svget4_s8::<{ 3usize as i32 }>(loaded), + svindex_s8((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_s16_with_svst4_s16() { + let mut storage = [0 as i16; 640usize]; + let data = svcreate4_s16( + svindex_s16((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s16((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s16((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s16((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_s16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld4_s16(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_i16( + svget4_s16::<{ 0usize as i32 }>(loaded), + svindex_s16((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i16( + svget4_s16::<{ 1usize as i32 }>(loaded), + svindex_s16((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i16( + svget4_s16::<{ 2usize as i32 }>(loaded), + svindex_s16((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i16( + svget4_s16::<{ 3usize as i32 }>(loaded), + svindex_s16((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_s32_with_svst4_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svcreate4_s32( + svindex_s32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_s32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld4_s32(svptrue_b32(), storage.as_ptr() as *const i32); + assert_vector_matches_i32( + svget4_s32::<{ 0usize as i32 }>(loaded), + svindex_s32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i32( + svget4_s32::<{ 1usize as i32 }>(loaded), + svindex_s32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i32( + svget4_s32::<{ 2usize as i32 }>(loaded), + svindex_s32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i32( + svget4_s32::<{ 3usize as i32 }>(loaded), + svindex_s32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_s64_with_svst4_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svcreate4_s64( + svindex_s64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_s64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld4_s64(svptrue_b64(), storage.as_ptr() as *const i64); + assert_vector_matches_i64( + svget4_s64::<{ 0usize as i32 }>(loaded), + svindex_s64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i64( + svget4_s64::<{ 1usize as i32 }>(loaded), + svindex_s64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i64( + svget4_s64::<{ 2usize as i32 }>(loaded), + svindex_s64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i64( + svget4_s64::<{ 3usize as i32 }>(loaded), + svindex_s64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_u8_with_svst4_u8() { + let mut storage = [0 as u8; 1280usize]; + let data = svcreate4_u8( + svindex_u8((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u8((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u8((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u8((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_u8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld4_u8(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u8( + svget4_u8::<{ 0usize as i32 }>(loaded), + svindex_u8((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u8( + svget4_u8::<{ 1usize as i32 }>(loaded), + svindex_u8((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u8( + svget4_u8::<{ 2usize as i32 }>(loaded), + svindex_u8((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u8( + svget4_u8::<{ 3usize as i32 }>(loaded), + svindex_u8((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_u16_with_svst4_u16() { + let mut storage = [0 as u16; 640usize]; + let data = svcreate4_u16( + svindex_u16((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u16((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u16((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u16((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_u16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld4_u16(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_u16( + svget4_u16::<{ 0usize as i32 }>(loaded), + svindex_u16((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u16( + svget4_u16::<{ 1usize as i32 }>(loaded), + svindex_u16((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u16( + svget4_u16::<{ 2usize as i32 }>(loaded), + svindex_u16((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u16( + svget4_u16::<{ 3usize as i32 }>(loaded), + svindex_u16((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_u32_with_svst4_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svcreate4_u32( + svindex_u32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_u32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld4_u32(svptrue_b32(), storage.as_ptr() as *const u32); + assert_vector_matches_u32( + svget4_u32::<{ 0usize as i32 }>(loaded), + svindex_u32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u32( + svget4_u32::<{ 1usize as i32 }>(loaded), + svindex_u32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u32( + svget4_u32::<{ 2usize as i32 }>(loaded), + svindex_u32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u32( + svget4_u32::<{ 3usize as i32 }>(loaded), + svindex_u32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_u64_with_svst4_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svcreate4_u64( + svindex_u64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_u64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld4_u64(svptrue_b64(), storage.as_ptr() as *const u64); + assert_vector_matches_u64( + svget4_u64::<{ 0usize as i32 }>(loaded), + svindex_u64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u64( + svget4_u64::<{ 1usize as i32 }>(loaded), + svindex_u64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u64( + svget4_u64::<{ 2usize as i32 }>(loaded), + svindex_u64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u64( + svget4_u64::<{ 3usize as i32 }>(loaded), + svindex_u64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_f32_with_svst4_vnum_f32() { + let len = svcntw() as usize; + let mut storage = [0 as f32; 320usize]; + let data = svcreate4_f32( + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + svst4_vnum_f32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld4_vnum_f32(svptrue_b32(), storage.as_ptr() as *const f32, 1); + assert_vector_matches_f32( + svget4_f32::<{ 0usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f32( + svget4_f32::<{ 1usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f32( + svget4_f32::<{ 2usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f32( + svget4_f32::<{ 3usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_f64_with_svst4_vnum_f64() { + let len = svcntd() as usize; + let mut storage = [0 as f64; 160usize]; + let data = svcreate4_f64( + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + svst4_vnum_f64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld4_vnum_f64(svptrue_b64(), storage.as_ptr() as *const f64, 1); + assert_vector_matches_f64( + svget4_f64::<{ 0usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f64( + svget4_f64::<{ 1usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f64( + svget4_f64::<{ 2usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f64( + svget4_f64::<{ 3usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_s8_with_svst4_vnum_s8() { + let len = svcntb() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svcreate4_s8( + svindex_s8( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s8( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s8( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s8( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld4_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i8( + svget4_s8::<{ 0usize as i32 }>(loaded), + svindex_s8( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i8( + svget4_s8::<{ 1usize as i32 }>(loaded), + svindex_s8( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i8( + svget4_s8::<{ 2usize as i32 }>(loaded), + svindex_s8( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i8( + svget4_s8::<{ 3usize as i32 }>(loaded), + svindex_s8( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_s16_with_svst4_vnum_s16() { + let len = svcnth() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svcreate4_s16( + svindex_s16( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s16( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s16( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s16( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld4_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_i16( + svget4_s16::<{ 0usize as i32 }>(loaded), + svindex_s16( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i16( + svget4_s16::<{ 1usize as i32 }>(loaded), + svindex_s16( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i16( + svget4_s16::<{ 2usize as i32 }>(loaded), + svindex_s16( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i16( + svget4_s16::<{ 3usize as i32 }>(loaded), + svindex_s16( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_s32_with_svst4_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i32; 320usize]; + let data = svcreate4_s32( + svindex_s32( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s32( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld4_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1); + assert_vector_matches_i32( + svget4_s32::<{ 0usize as i32 }>(loaded), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i32( + svget4_s32::<{ 1usize as i32 }>(loaded), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i32( + svget4_s32::<{ 2usize as i32 }>(loaded), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i32( + svget4_s32::<{ 3usize as i32 }>(loaded), + svindex_s32( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_s64_with_svst4_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i64; 160usize]; + let data = svcreate4_s64( + svindex_s64( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s64( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld4_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1); + assert_vector_matches_i64( + svget4_s64::<{ 0usize as i32 }>(loaded), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i64( + svget4_s64::<{ 1usize as i32 }>(loaded), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i64( + svget4_s64::<{ 2usize as i32 }>(loaded), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i64( + svget4_s64::<{ 3usize as i32 }>(loaded), + svindex_s64( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_u8_with_svst4_vnum_u8() { + let len = svcntb() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svcreate4_u8( + svindex_u8( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u8( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u8( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u8( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld4_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u8( + svget4_u8::<{ 0usize as i32 }>(loaded), + svindex_u8( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u8( + svget4_u8::<{ 1usize as i32 }>(loaded), + svindex_u8( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u8( + svget4_u8::<{ 2usize as i32 }>(loaded), + svindex_u8( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u8( + svget4_u8::<{ 3usize as i32 }>(loaded), + svindex_u8( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_u16_with_svst4_vnum_u16() { + let len = svcnth() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svcreate4_u16( + svindex_u16( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u16( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u16( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u16( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld4_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_u16( + svget4_u16::<{ 0usize as i32 }>(loaded), + svindex_u16( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u16( + svget4_u16::<{ 1usize as i32 }>(loaded), + svindex_u16( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u16( + svget4_u16::<{ 2usize as i32 }>(loaded), + svindex_u16( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u16( + svget4_u16::<{ 3usize as i32 }>(loaded), + svindex_u16( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_u32_with_svst4_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u32; 320usize]; + let data = svcreate4_u32( + svindex_u32( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u32( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u32( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u32( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld4_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1); + assert_vector_matches_u32( + svget4_u32::<{ 0usize as i32 }>(loaded), + svindex_u32( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u32( + svget4_u32::<{ 1usize as i32 }>(loaded), + svindex_u32( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u32( + svget4_u32::<{ 2usize as i32 }>(loaded), + svindex_u32( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u32( + svget4_u32::<{ 3usize as i32 }>(loaded), + svindex_u32( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_u64_with_svst4_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u64; 160usize]; + let data = svcreate4_u64( + svindex_u64( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u64( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u64( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u64( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld4_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1); + assert_vector_matches_u64( + svget4_u64::<{ 0usize as i32 }>(loaded), + svindex_u64( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u64( + svget4_u64::<{ 1usize as i32 }>(loaded), + svindex_u64( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u64( + svget4_u64::<{ 2usize as i32 }>(loaded), + svindex_u64( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u64( + svget4_u64::<{ 3usize as i32 }>(loaded), + svindex_u64( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_f32() { + svsetffr(); + let _ = svld1_f32(svptrue_b32(), F32_DATA.as_ptr()); + let loaded = svldff1_f32(svptrue_b32(), F32_DATA.as_ptr()); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_f64() { + svsetffr(); + let _ = svld1_f64(svptrue_b64(), F64_DATA.as_ptr()); + let loaded = svldff1_f64(svptrue_b64(), F64_DATA.as_ptr()); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_s8() { + svsetffr(); + let _ = svld1_s8(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1_s8(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i8( + loaded, + svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_s16() { + svsetffr(); + let _ = svld1_s16(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldff1_s16(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_s32() { + svsetffr(); + let _ = svld1_s32(svptrue_b32(), I32_DATA.as_ptr()); + let loaded = svldff1_s32(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_s64() { + svsetffr(); + let _ = svld1_s64(svptrue_b64(), I64_DATA.as_ptr()); + let loaded = svldff1_s64(svptrue_b64(), I64_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_u8() { + svsetffr(); + let _ = svld1_u8(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldff1_u8(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u8( + loaded, + svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_u16() { + svsetffr(); + let _ = svld1_u16(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldff1_u16(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_u32() { + svsetffr(); + let _ = svld1_u32(svptrue_b32(), U32_DATA.as_ptr()); + let loaded = svldff1_u32(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_u64() { + svsetffr(); + let _ = svld1_u64(svptrue_b64(), U64_DATA.as_ptr()); + let loaded = svldff1_u64(svptrue_b64(), U64_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_f32() { + svsetffr(); + let _ = svld1_vnum_f32(svptrue_b32(), F32_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_f32(svptrue_b32(), F32_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_f64() { + svsetffr(); + let _ = svld1_vnum_f64(svptrue_b64(), F64_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_f64(svptrue_b64(), F64_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_s8() { + svsetffr(); + let _ = svld1_vnum_s8(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_s8(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntb() as usize; + assert_vector_matches_i8( + loaded, + svindex_s8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_s16() { + svsetffr(); + let _ = svld1_vnum_s16(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_s16(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_s32() { + svsetffr(); + let _ = svld1_vnum_s32(svptrue_b32(), I32_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_s32(svptrue_b32(), I32_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_s64() { + svsetffr(); + let _ = svld1_vnum_s64(svptrue_b64(), I64_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_s64(svptrue_b64(), I64_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_u8() { + svsetffr(); + let _ = svld1_vnum_u8(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_u8(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntb() as usize; + assert_vector_matches_u8( + loaded, + svindex_u8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_u16() { + svsetffr(); + let _ = svld1_vnum_u16(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_u16(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_u32() { + svsetffr(); + let _ = svld1_vnum_u32(svptrue_b32(), U32_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_u32(svptrue_b32(), U32_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_u64() { + svsetffr(); + let _ = svld1_vnum_u64(svptrue_b64(), U64_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_u64(svptrue_b64(), U64_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_s16() { + svsetffr(); + let _ = svld1sb_s16(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_s16(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_s32() { + svsetffr(); + let _ = svld1sb_s32(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_s32(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_s32() { + svsetffr(); + let _ = svld1sh_s32(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldff1sh_s32(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_s64() { + svsetffr(); + let _ = svld1sb_s64(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_s64(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_s64() { + svsetffr(); + let _ = svld1sh_s64(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldff1sh_s64(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_s64() { + svsetffr(); + let _ = svld1sw_s64(svptrue_b32(), I32_DATA.as_ptr()); + let loaded = svldff1sw_s64(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_u16() { + svsetffr(); + let _ = svld1sb_u16(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_u16(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_u32() { + svsetffr(); + let _ = svld1sb_u32(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_u32(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_u32() { + svsetffr(); + let _ = svld1sh_u32(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldff1sh_u32(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_u64() { + svsetffr(); + let _ = svld1sb_u64(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_u64(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_u64() { + svsetffr(); + let _ = svld1sh_u64(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldff1sh_u64(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_u64() { + svsetffr(); + let _ = svld1sw_u64(svptrue_b32(), I32_DATA.as_ptr()); + let loaded = svldff1sw_u64(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_s16() { + svsetffr(); + let _ = svld1sb_vnum_s16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_s16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_s32() { + svsetffr(); + let _ = svld1sb_vnum_s32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_s32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_vnum_s32() { + svsetffr(); + let _ = svld1sh_vnum_s32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldff1sh_vnum_s32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_s64() { + svsetffr(); + let _ = svld1sb_vnum_s64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_s64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_vnum_s64() { + svsetffr(); + let _ = svld1sh_vnum_s64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldff1sh_vnum_s64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_vnum_s64() { + svsetffr(); + let _ = svld1sw_vnum_s64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let loaded = svldff1sw_vnum_s64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_u16() { + svsetffr(); + let _ = svld1sb_vnum_u16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_u16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_u32() { + svsetffr(); + let _ = svld1sb_vnum_u32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_u32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_vnum_u32() { + svsetffr(); + let _ = svld1sh_vnum_u32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldff1sh_vnum_u32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_u64() { + svsetffr(); + let _ = svld1sb_vnum_u64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_u64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_vnum_u64() { + svsetffr(); + let _ = svld1sh_vnum_u64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldff1sh_vnum_u64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_vnum_u64() { + svsetffr(); + let _ = svld1sw_vnum_u64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let loaded = svldff1sw_vnum_u64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_s16() { + svsetffr(); + let _ = svld1ub_s16(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldff1ub_s16(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_s32() { + svsetffr(); + let _ = svld1ub_s32(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldff1ub_s32(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_s32() { + svsetffr(); + let _ = svld1uh_s32(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldff1uh_s32(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_s64() { + svsetffr(); + let _ = svld1ub_s64(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldff1ub_s64(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_s64() { + svsetffr(); + let _ = svld1uh_s64(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldff1uh_s64(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_s64() { + svsetffr(); + let _ = svld1uw_s64(svptrue_b32(), U32_DATA.as_ptr()); + let loaded = svldff1uw_s64(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_u16() { + svsetffr(); + let _ = svld1ub_u16(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldff1ub_u16(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_u32() { + svsetffr(); + let _ = svld1ub_u32(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldff1ub_u32(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_u32() { + svsetffr(); + let _ = svld1uh_u32(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldff1uh_u32(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_u64() { + svsetffr(); + let _ = svld1ub_u64(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldff1ub_u64(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_u64() { + svsetffr(); + let _ = svld1uh_u64(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldff1uh_u64(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_u64() { + svsetffr(); + let _ = svld1uw_u64(svptrue_b32(), U32_DATA.as_ptr()); + let loaded = svldff1uw_u64(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_vnum_s16() { + svsetffr(); + let _ = svld1ub_vnum_s16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldff1ub_vnum_s16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_vnum_s32() { + svsetffr(); + let _ = svld1ub_vnum_s32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldff1ub_vnum_s32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_vnum_s32() { + svsetffr(); + let _ = svld1uh_vnum_s32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldff1uh_vnum_s32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_vnum_s64() { + svsetffr(); + let _ = svld1ub_vnum_s64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldff1ub_vnum_s64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_vnum_s64() { + svsetffr(); + let _ = svld1uh_vnum_s64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldff1uh_vnum_s64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_vnum_s64() { + svsetffr(); + let _ = svld1uw_vnum_s64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let loaded = svldff1uw_vnum_s64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_vnum_u16() { + svsetffr(); + let _ = svld1ub_vnum_u16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldff1ub_vnum_u16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_vnum_u32() { + svsetffr(); + let _ = svld1ub_vnum_u32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldff1ub_vnum_u32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_vnum_u32() { + svsetffr(); + let _ = svld1uh_vnum_u32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldff1uh_vnum_u32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_vnum_u64() { + svsetffr(); + let _ = svld1ub_vnum_u64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldff1ub_vnum_u64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_vnum_u64() { + svsetffr(); + let _ = svld1uh_vnum_u64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldff1uh_vnum_u64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_vnum_u64() { + svsetffr(); + let _ = svld1uw_vnum_u64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let loaded = svldff1uw_vnum_u64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_f32() { + svsetffr(); + let _ = svld1_f32(svptrue_b32(), F32_DATA.as_ptr()); + let loaded = svldnf1_f32(svptrue_b32(), F32_DATA.as_ptr()); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_f64() { + svsetffr(); + let _ = svld1_f64(svptrue_b64(), F64_DATA.as_ptr()); + let loaded = svldnf1_f64(svptrue_b64(), F64_DATA.as_ptr()); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_s8() { + svsetffr(); + let _ = svld1_s8(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldnf1_s8(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i8( + loaded, + svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_s16() { + svsetffr(); + let _ = svld1_s16(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldnf1_s16(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_s32() { + svsetffr(); + let _ = svld1_s32(svptrue_b32(), I32_DATA.as_ptr()); + let loaded = svldnf1_s32(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_s64() { + svsetffr(); + let _ = svld1_s64(svptrue_b64(), I64_DATA.as_ptr()); + let loaded = svldnf1_s64(svptrue_b64(), I64_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_u8() { + svsetffr(); + let _ = svld1_u8(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldnf1_u8(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u8( + loaded, + svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_u16() { + svsetffr(); + let _ = svld1_u16(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldnf1_u16(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_u32() { + svsetffr(); + let _ = svld1_u32(svptrue_b32(), U32_DATA.as_ptr()); + let loaded = svldnf1_u32(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_u64() { + svsetffr(); + let _ = svld1_u64(svptrue_b64(), U64_DATA.as_ptr()); + let loaded = svldnf1_u64(svptrue_b64(), U64_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_f32() { + svsetffr(); + let _ = svld1_vnum_f32(svptrue_b32(), F32_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_f32(svptrue_b32(), F32_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_f64() { + svsetffr(); + let _ = svld1_vnum_f64(svptrue_b64(), F64_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_f64(svptrue_b64(), F64_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_s8() { + svsetffr(); + let _ = svld1_vnum_s8(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_s8(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntb() as usize; + assert_vector_matches_i8( + loaded, + svindex_s8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_s16() { + svsetffr(); + let _ = svld1_vnum_s16(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_s16(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_s32() { + svsetffr(); + let _ = svld1_vnum_s32(svptrue_b32(), I32_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_s32(svptrue_b32(), I32_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_s64() { + svsetffr(); + let _ = svld1_vnum_s64(svptrue_b64(), I64_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_s64(svptrue_b64(), I64_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_u8() { + svsetffr(); + let _ = svld1_vnum_u8(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_u8(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntb() as usize; + assert_vector_matches_u8( + loaded, + svindex_u8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_u16() { + svsetffr(); + let _ = svld1_vnum_u16(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_u16(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_u32() { + svsetffr(); + let _ = svld1_vnum_u32(svptrue_b32(), U32_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_u32(svptrue_b32(), U32_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_u64() { + svsetffr(); + let _ = svld1_vnum_u64(svptrue_b64(), U64_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_u64(svptrue_b64(), U64_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_s16() { + svsetffr(); + let _ = svld1sb_s16(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldnf1sb_s16(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_s32() { + svsetffr(); + let _ = svld1sb_s32(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldnf1sb_s32(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_s32() { + svsetffr(); + let _ = svld1sh_s32(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldnf1sh_s32(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_s64() { + svsetffr(); + let _ = svld1sb_s64(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldnf1sb_s64(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_s64() { + svsetffr(); + let _ = svld1sh_s64(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldnf1sh_s64(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sw_s64() { + svsetffr(); + let _ = svld1sw_s64(svptrue_b32(), I32_DATA.as_ptr()); + let loaded = svldnf1sw_s64(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_u16() { + svsetffr(); + let _ = svld1sb_u16(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldnf1sb_u16(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_u32() { + svsetffr(); + let _ = svld1sb_u32(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldnf1sb_u32(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_u32() { + svsetffr(); + let _ = svld1sh_u32(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldnf1sh_u32(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_u64() { + svsetffr(); + let _ = svld1sb_u64(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldnf1sb_u64(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_u64() { + svsetffr(); + let _ = svld1sh_u64(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldnf1sh_u64(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sw_u64() { + svsetffr(); + let _ = svld1sw_u64(svptrue_b32(), I32_DATA.as_ptr()); + let loaded = svldnf1sw_u64(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_vnum_s16() { + svsetffr(); + let _ = svld1sb_vnum_s16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldnf1sb_vnum_s16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_vnum_s32() { + svsetffr(); + let _ = svld1sb_vnum_s32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldnf1sb_vnum_s32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_vnum_s32() { + svsetffr(); + let _ = svld1sh_vnum_s32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldnf1sh_vnum_s32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_vnum_s64() { + svsetffr(); + let _ = svld1sb_vnum_s64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldnf1sb_vnum_s64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_vnum_s64() { + svsetffr(); + let _ = svld1sh_vnum_s64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldnf1sh_vnum_s64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sw_vnum_s64() { + svsetffr(); + let _ = svld1sw_vnum_s64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let loaded = svldnf1sw_vnum_s64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_vnum_u16() { + svsetffr(); + let _ = svld1sb_vnum_u16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldnf1sb_vnum_u16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_vnum_u32() { + svsetffr(); + let _ = svld1sb_vnum_u32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldnf1sb_vnum_u32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_vnum_u32() { + svsetffr(); + let _ = svld1sh_vnum_u32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldnf1sh_vnum_u32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_vnum_u64() { + svsetffr(); + let _ = svld1sb_vnum_u64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldnf1sb_vnum_u64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_vnum_u64() { + svsetffr(); + let _ = svld1sh_vnum_u64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldnf1sh_vnum_u64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sw_vnum_u64() { + svsetffr(); + let _ = svld1sw_vnum_u64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let loaded = svldnf1sw_vnum_u64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_s16() { + svsetffr(); + let _ = svld1ub_s16(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldnf1ub_s16(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_s32() { + svsetffr(); + let _ = svld1ub_s32(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldnf1ub_s32(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_s32() { + svsetffr(); + let _ = svld1uh_s32(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldnf1uh_s32(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_s64() { + svsetffr(); + let _ = svld1ub_s64(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldnf1ub_s64(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_s64() { + svsetffr(); + let _ = svld1uh_s64(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldnf1uh_s64(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uw_s64() { + svsetffr(); + let _ = svld1uw_s64(svptrue_b32(), U32_DATA.as_ptr()); + let loaded = svldnf1uw_s64(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_u16() { + svsetffr(); + let _ = svld1ub_u16(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldnf1ub_u16(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_u32() { + svsetffr(); + let _ = svld1ub_u32(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldnf1ub_u32(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_u32() { + svsetffr(); + let _ = svld1uh_u32(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldnf1uh_u32(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_u64() { + svsetffr(); + let _ = svld1ub_u64(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldnf1ub_u64(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_u64() { + svsetffr(); + let _ = svld1uh_u64(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldnf1uh_u64(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uw_u64() { + svsetffr(); + let _ = svld1uw_u64(svptrue_b32(), U32_DATA.as_ptr()); + let loaded = svldnf1uw_u64(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_vnum_s16() { + svsetffr(); + let _ = svld1ub_vnum_s16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldnf1ub_vnum_s16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_vnum_s32() { + svsetffr(); + let _ = svld1ub_vnum_s32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldnf1ub_vnum_s32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_vnum_s32() { + svsetffr(); + let _ = svld1uh_vnum_s32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldnf1uh_vnum_s32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_vnum_s64() { + svsetffr(); + let _ = svld1ub_vnum_s64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldnf1ub_vnum_s64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_vnum_s64() { + svsetffr(); + let _ = svld1uh_vnum_s64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldnf1uh_vnum_s64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uw_vnum_s64() { + svsetffr(); + let _ = svld1uw_vnum_s64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let loaded = svldnf1uw_vnum_s64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_vnum_u16() { + svsetffr(); + let _ = svld1ub_vnum_u16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldnf1ub_vnum_u16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_vnum_u32() { + svsetffr(); + let _ = svld1ub_vnum_u32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldnf1ub_vnum_u32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_vnum_u32() { + svsetffr(); + let _ = svld1uh_vnum_u32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldnf1uh_vnum_u32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_vnum_u64() { + svsetffr(); + let _ = svld1ub_vnum_u64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldnf1ub_vnum_u64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_vnum_u64() { + svsetffr(); + let _ = svld1uh_vnum_u64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldnf1uh_vnum_u64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uw_vnum_u64() { + svsetffr(); + let _ = svld1uw_vnum_u64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let loaded = svldnf1uw_vnum_u64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_f32_with_svstnt1_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + svstnt1_f32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svldnt1_f32(svptrue_b32(), storage.as_ptr() as *const f32); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_f64_with_svstnt1_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + svstnt1_f64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svldnt1_f64(svptrue_b64(), storage.as_ptr() as *const f64); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_s8_with_svstnt1_s8() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_s8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1_s8(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i8( + loaded, + svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_s16_with_svstnt1_s16() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_s16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1_s16(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_s32_with_svstnt1_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_s32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1_s32(svptrue_b32(), storage.as_ptr() as *const i32); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_s64_with_svstnt1_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_s64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svldnt1_s64(svptrue_b64(), storage.as_ptr() as *const i64); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_u8_with_svstnt1_u8() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_u8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svldnt1_u8(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u8( + loaded, + svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_u16_with_svstnt1_u16() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_u16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svldnt1_u16(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_u32_with_svstnt1_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_u32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svldnt1_u32(svptrue_b32(), storage.as_ptr() as *const u32); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_u64_with_svstnt1_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_u64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svldnt1_u64(svptrue_b64(), storage.as_ptr() as *const u64); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_f32_with_svstnt1_vnum_f32() { + let len = svcntw() as usize; + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); + svstnt1_vnum_f32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svldnt1_vnum_f32(svptrue_b32(), storage.as_ptr() as *const f32, 1); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_f64_with_svstnt1_vnum_f64() { + let len = svcntd() as usize; + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); + svstnt1_vnum_f64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svldnt1_vnum_f64(svptrue_b64(), storage.as_ptr() as *const f64, 1); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_s8_with_svstnt1_vnum_s8() { + let len = svcntb() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i8( + loaded, + svindex_s8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_s16_with_svstnt1_vnum_s16() { + let len = svcnth() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_s32_with_svstnt1_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1); + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_s64_with_svstnt1_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svldnt1_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_u8_with_svstnt1_vnum_u8() { + let len = svcntb() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svldnt1_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u8( + loaded, + svindex_u8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_u16_with_svstnt1_vnum_u16() { + let len = svcnth() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svldnt1_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_u32_with_svstnt1_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svldnt1_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1); + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_u64_with_svstnt1_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svldnt1_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb() { + svsetffr(); + let loaded = svprfb::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b8(), I64_DATA.as_ptr()); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh() { + svsetffr(); + let loaded = svprfh::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b16(), I64_DATA.as_ptr()); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw() { + svsetffr(); + let loaded = svprfw::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b32(), I64_DATA.as_ptr()); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd() { + svsetffr(); + let loaded = svprfd::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b64(), I64_DATA.as_ptr()); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_gather_s32offset() { + let offsets = svindex_s32(0, 4u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfb_gather_s32offset::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + offsets, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_gather_s32index() { + let indices = svindex_s32(0, 1); + svsetffr(); + let loaded = svprfh_gather_s32index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_gather_s32index() { + let indices = svindex_s32(0, 1); + svsetffr(); + let loaded = svprfw_gather_s32index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_gather_s32index() { + let indices = svindex_s32(0, 1); + svsetffr(); + let loaded = svprfd_gather_s32index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_gather_s64offset() { + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfb_gather_s64offset::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + offsets, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_gather_s64index() { + let indices = svindex_s64(0, 1); + svsetffr(); + let loaded = svprfh_gather_s64index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_gather_s64index() { + let indices = svindex_s64(0, 1); + svsetffr(); + let loaded = svprfw_gather_s64index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_gather_s64index() { + let indices = svindex_s64(0, 1); + svsetffr(); + let loaded = svprfd_gather_s64index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_gather_u32offset() { + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfb_gather_u32offset::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + offsets, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_gather_u32index() { + let indices = svindex_u32(0, 1); + svsetffr(); + let loaded = svprfh_gather_u32index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_gather_u32index() { + let indices = svindex_u32(0, 1); + svsetffr(); + let loaded = svprfw_gather_u32index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_gather_u32index() { + let indices = svindex_u32(0, 1); + svsetffr(); + let loaded = svprfd_gather_u32index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_gather_u64offset() { + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfb_gather_u64offset::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + offsets, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_gather_u64index() { + let indices = svindex_u64(0, 1); + svsetffr(); + let loaded = svprfh_gather_u64index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_gather_u64index() { + let indices = svindex_u64(0, 1); + svsetffr(); + let loaded = svprfw_gather_u64index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_gather_u64index() { + let indices = svindex_u64(0, 1); + svsetffr(); + let loaded = svprfd_gather_u64index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_gather_u64base() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfb_gather_u64base::<{ svprfop::SV_PLDL1KEEP }>(svptrue_b64(), bases); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_gather_u64base() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfh_gather_u64base::<{ svprfop::SV_PLDL1KEEP }>(svptrue_b64(), bases); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_gather_u64base() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfw_gather_u64base::<{ svprfop::SV_PLDL1KEEP }>(svptrue_b64(), bases); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_gather_u64base() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfd_gather_u64base::<{ svprfop::SV_PLDL1KEEP }>(svptrue_b64(), bases); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_gather_u32base_offset() { + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfb_gather_u32base_offset::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b32(), + bases, + U32_DATA.as_ptr() as i64 + 4u32 as i64, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_gather_u32base_index() { + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfh_gather_u32base_index::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b32(), + bases, + U32_DATA.as_ptr() as i64 / (4u32 as i64) + 1, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_gather_u32base_index() { + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfw_gather_u32base_index::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b32(), + bases, + U32_DATA.as_ptr() as i64 / (4u32 as i64) + 1, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_gather_u32base_index() { + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfd_gather_u32base_index::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b32(), + bases, + U32_DATA.as_ptr() as i64 / (4u32 as i64) + 1, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_gather_u64base_offset() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfb_gather_u64base_offset::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b64(), + bases, + 8u32.try_into().unwrap(), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_gather_u64base_index() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfh_gather_u64base_index::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b64(), + bases, + 1.try_into().unwrap(), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_gather_u64base_index() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfw_gather_u64base_index::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b64(), + bases, + 1.try_into().unwrap(), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_gather_u64base_index() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfd_gather_u64base_index::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b64(), + bases, + 1.try_into().unwrap(), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_vnum() { + svsetffr(); + let loaded = svprfb_vnum::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b8(), I64_DATA.as_ptr(), 1); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_vnum() { + svsetffr(); + let loaded = svprfh_vnum::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b16(), I64_DATA.as_ptr(), 1); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_vnum() { + svsetffr(); + let loaded = svprfw_vnum::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b32(), I64_DATA.as_ptr(), 1); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_vnum() { + svsetffr(); + let loaded = svprfd_vnum::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b64(), I64_DATA.as_ptr(), 1); +} +#[simd_test(enable = "sve")] +unsafe fn test_ffr() { + svsetffr(); + let ffr = svrdffr(); + assert_vector_matches_u8(svdup_n_u8_z(ffr, 1), svindex_u8(1, 0)); + let pred = svdupq_n_b8( + true, false, true, false, true, false, true, false, true, false, true, false, true, false, + true, false, + ); + svwrffr(pred); + let ffr = svrdffr_z(svptrue_b8()); + assert_vector_matches_u8(svdup_n_u8_z(ffr, 1), svdup_n_u8_z(pred, 1)); +} diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs index 8b137891791fe..79be8a88890c7 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs @@ -1 +1,23857 @@ +// This code is automatically generated. DO NOT MODIFY. +// +// Instead, modify `crates/stdarch-gen-arm/spec/` and run the following command to re-generate this file: +// +// ``` +// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec +// ``` +#![allow(improper_ctypes)] +#[cfg(test)] +use stdarch_test::assert_instr; + +use super::*; + +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv16i8")] + fn _svaba_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svaba_s8(op1, op2, op3) } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svaba_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv8i16")] + fn _svaba_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svaba_s16(op1, op2, op3) } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svaba_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv4i32")] + fn _svaba_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svaba_s32(op1, op2, op3) } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svaba_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv2i64")] + fn _svaba_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svaba_s64(op1, op2, op3) } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svaba_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv16i8")] + fn _svaba_u8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svaba_u8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svaba_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv8i16")] + fn _svaba_u16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svaba_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svaba_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv4i32")] + fn _svaba_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svaba_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svaba_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv2i64")] + fn _svaba_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svaba_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svaba_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalb.nxv8i16")] + fn _svabalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svabalb_s16(op1, op2, op3) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svabalb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalb.nxv4i32")] + fn _svabalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svabalb_s32(op1, op2, op3) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svabalb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalb.nxv2i64")] + fn _svabalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svabalb_s64(op1, op2, op3) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svabalb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalb.nxv8i16")] + fn _svabalb_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svabalb_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svabalb_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalb.nxv4i32")] + fn _svabalb_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svabalb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svabalb_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalb.nxv2i64")] + fn _svabalb_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svabalb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svabalb_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalt.nxv8i16")] + fn _svabalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svabalt_s16(op1, op2, op3) } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svabalt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalt.nxv4i32")] + fn _svabalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svabalt_s32(op1, op2, op3) } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svabalt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalt.nxv2i64")] + fn _svabalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svabalt_s64(op1, op2, op3) } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svabalt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalt.nxv8i16")] + fn _svabalt_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svabalt_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svabalt_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalt.nxv4i32")] + fn _svabalt_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svabalt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svabalt_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalt.nxv2i64")] + fn _svabalt_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svabalt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svabalt_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlb.nxv8i16")] + fn _svabdlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svabdlb_s16(op1, op2) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svabdlb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlb.nxv4i32")] + fn _svabdlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svabdlb_s32(op1, op2) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svabdlb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlb.nxv2i64")] + fn _svabdlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svabdlb_s64(op1, op2) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svabdlb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlb.nxv8i16")] + fn _svabdlb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svabdlb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svabdlb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlb.nxv4i32")] + fn _svabdlb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svabdlb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svabdlb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlb.nxv2i64")] + fn _svabdlb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svabdlb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svabdlb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlt.nxv8i16")] + fn _svabdlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svabdlt_s16(op1, op2) } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svabdlt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlt.nxv4i32")] + fn _svabdlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svabdlt_s32(op1, op2) } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svabdlt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlt.nxv2i64")] + fn _svabdlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svabdlt_s64(op1, op2) } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svabdlt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlt.nxv8i16")] + fn _svabdlt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svabdlt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svabdlt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlt.nxv4i32")] + fn _svabdlt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svabdlt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svabdlt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlt.nxv2i64")] + fn _svabdlt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svabdlt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svabdlt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sadalp.nxv8i16")] + fn _svadalp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svadalp_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t { + svadalp_s16_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s16_z(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t { + svadalp_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sadalp.nxv4i32")] + fn _svadalp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svadalp_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t { + svadalp_s32_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s32_z(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t { + svadalp_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sadalp.nxv2i64")] + fn _svadalp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svadalp_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t { + svadalp_s64_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s64_z(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t { + svadalp_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uadalp.nxv8i16")] + fn _svadalp_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svadalp_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + svadalp_u16_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + svadalp_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uadalp.nxv4i32")] + fn _svadalp_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svadalp_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + svadalp_u32_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + svadalp_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uadalp.nxv2i64")] + fn _svadalp_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svadalp_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + svadalp_u64_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + svadalp_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Add with carry long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclb))] +pub fn svadclb_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclb.nxv4i32")] + fn _svadclb_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svadclb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Add with carry long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclb))] +pub fn svadclb_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svadclb_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Add with carry long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclb))] +pub fn svadclb_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclb.nxv2i64")] + fn _svadclb_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svadclb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Add with carry long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclb))] +pub fn svadclb_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svadclb_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Add with carry long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclt))] +pub fn svadclt_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclt.nxv4i32")] + fn _svadclt_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svadclt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Add with carry long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclt))] +pub fn svadclt_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svadclt_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Add with carry long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclt))] +pub fn svadclt_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclt.nxv2i64")] + fn _svadclt_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svadclt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Add with carry long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclt))] +pub fn svadclt_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svadclt_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnb.nxv8i16")] + fn _svaddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svaddhnb_s16(op1, op2) } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { + svaddhnb_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnb.nxv4i32")] + fn _svaddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svaddhnb_s32(op1, op2) } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { + svaddhnb_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnb.nxv2i64")] + fn _svaddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svaddhnb_s64(op1, op2) } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { + svaddhnb_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svaddhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { + svaddhnb_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svaddhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { + svaddhnb_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svaddhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { + svaddhnb_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnt.nxv8i16")] + fn _svaddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svaddhnt_s16(even, op1, op2) } +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { + svaddhnt_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnt.nxv4i32")] + fn _svaddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svaddhnt_s32(even, op1, op2) } +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { + svaddhnt_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnt.nxv2i64")] + fn _svaddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svaddhnt_s64(even, op1, op2) } +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { + svaddhnt_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svaddhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t { + svaddhnt_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svaddhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t { + svaddhnt_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svaddhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t { + svaddhnt_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlb.nxv8i16")] + fn _svaddlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlb_s16(op1, op2) } +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svaddlb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlb.nxv4i32")] + fn _svaddlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlb_s32(op1, op2) } +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svaddlb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlb.nxv2i64")] + fn _svaddlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlb_s64(op1, op2) } +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svaddlb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlb.nxv8i16")] + fn _svaddlb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svaddlb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlb.nxv4i32")] + fn _svaddlb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svaddlb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlb.nxv2i64")] + fn _svaddlb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svaddlb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Add long (bottom + top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.saddlbt.nxv8i16" + )] + fn _svaddlbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlbt_s16(op1, op2) } +} +#[doc = "Add long (bottom + top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svaddlbt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add long (bottom + top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.saddlbt.nxv4i32" + )] + fn _svaddlbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlbt_s32(op1, op2) } +} +#[doc = "Add long (bottom + top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svaddlbt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add long (bottom + top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.saddlbt.nxv2i64" + )] + fn _svaddlbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlbt_s64(op1, op2) } +} +#[doc = "Add long (bottom + top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svaddlbt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlt.nxv8i16")] + fn _svaddlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlt_s16(op1, op2) } +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svaddlt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlt.nxv4i32")] + fn _svaddlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlt_s32(op1, op2) } +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svaddlt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlt.nxv2i64")] + fn _svaddlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlt_s64(op1, op2) } +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svaddlt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlt.nxv8i16")] + fn _svaddlt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svaddlt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlt.nxv4i32")] + fn _svaddlt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svaddlt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlt.nxv2i64")] + fn _svaddlt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svaddlt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(faddp))] +pub fn svaddp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddp.nxv4f32")] + fn _svaddp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svaddp_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(faddp))] +pub fn svaddp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svaddp_f32_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(faddp))] +pub fn svaddp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddp.nxv2f64")] + fn _svaddp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svaddp_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(faddp))] +pub fn svaddp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svaddp_f64_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv16i8")] + fn _svaddp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svaddp_s8_m(pg, op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svaddp_s8_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv8i16")] + fn _svaddp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svaddp_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svaddp_s16_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv4i32")] + fn _svaddp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svaddp_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svaddp_s32_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv2i64")] + fn _svaddp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svaddp_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svaddp_s64_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svaddp_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svaddp_u8_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svaddp_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svaddp_u16_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svaddp_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svaddp_u32_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svaddp_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svaddp_u64_m(pg, op1, op2) +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwb.nxv8i16")] + fn _svaddwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddwb_s16(op1, op2) } +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_n_s16(op1: svint16_t, op2: i8) -> svint16_t { + svaddwb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwb.nxv4i32")] + fn _svaddwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddwb_s32(op1, op2) } +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_n_s32(op1: svint32_t, op2: i16) -> svint32_t { + svaddwb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwb.nxv2i64")] + fn _svaddwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddwb_s64(op1, op2) } +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_n_s64(op1: svint64_t, op2: i32) -> svint64_t { + svaddwb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwb.nxv8i16")] + fn _svaddwb_u16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddwb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { + svaddwb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwb.nxv4i32")] + fn _svaddwb_u32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddwb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { + svaddwb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwb.nxv2i64")] + fn _svaddwb_u64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddwb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { + svaddwb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwt.nxv8i16")] + fn _svaddwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddwt_s16(op1, op2) } +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_n_s16(op1: svint16_t, op2: i8) -> svint16_t { + svaddwt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwt.nxv4i32")] + fn _svaddwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddwt_s32(op1, op2) } +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_n_s32(op1: svint32_t, op2: i16) -> svint32_t { + svaddwt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwt.nxv2i64")] + fn _svaddwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddwt_s64(op1, op2) } +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_n_s64(op1: svint64_t, op2: i32) -> svint64_t { + svaddwt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwt.nxv8i16")] + fn _svaddwt_u16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddwt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { + svaddwt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwt.nxv4i32")] + fn _svaddwt_u32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddwt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { + svaddwt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwt.nxv2i64")] + fn _svaddwt_u64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddwt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { + svaddwt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "AES single round decryption"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesd[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(aesd))] +pub fn svaesd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aesd")] + fn _svaesd_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svaesd_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "AES single round encryption"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaese[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(aese))] +pub fn svaese_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aese")] + fn _svaese_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svaese_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "AES inverse mix columns"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesimc[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(aesimc))] +pub fn svaesimc_u8(op: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aesimc")] + fn _svaesimc_u8(op: svint8_t) -> svint8_t; + } + unsafe { _svaesimc_u8(op.as_signed()).as_unsigned() } +} +#[doc = "AES mix columns"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesmc[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(aesmc))] +pub fn svaesmc_u8(op: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aesmc")] + fn _svaesmc_u8(op: svint8_t) -> svint8_t; + } + unsafe { _svaesmc_u8(op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv16i8")] + fn _svbcax_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svbcax_s8(op1, op2, op3) } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svbcax_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv8i16")] + fn _svbcax_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svbcax_s16(op1, op2, op3) } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svbcax_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv4i32")] + fn _svbcax_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svbcax_s32(op1, op2, op3) } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svbcax_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv2i64")] + fn _svbcax_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svbcax_s64(op1, op2, op3) } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svbcax_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svbcax_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svbcax_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svbcax_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svbcax_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svbcax_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svbcax_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svbcax_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svbcax_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv16i8")] + fn _svbdep_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svbdep_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svbdep_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv8i16")] + fn _svbdep_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svbdep_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svbdep_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv4i32")] + fn _svbdep_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svbdep_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svbdep_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv2i64")] + fn _svbdep_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svbdep_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svbdep_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv16i8")] + fn _svbext_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svbext_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svbext_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv8i16")] + fn _svbext_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svbext_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svbext_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv4i32")] + fn _svbext_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svbext_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svbext_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv2i64")] + fn _svbext_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svbext_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svbext_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv16i8")] + fn _svbgrp_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svbgrp_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svbgrp_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv8i16")] + fn _svbgrp_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svbgrp_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svbgrp_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv4i32")] + fn _svbgrp_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svbgrp_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svbgrp_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv2i64")] + fn _svbgrp_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svbgrp_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svbgrp_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv16i8")] + fn _svbsl1n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svbsl1n_s8(op1, op2, op3) } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svbsl1n_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv8i16")] + fn _svbsl1n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svbsl1n_s16(op1, op2, op3) } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svbsl1n_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv4i32")] + fn _svbsl1n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svbsl1n_s32(op1, op2, op3) } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svbsl1n_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv2i64")] + fn _svbsl1n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svbsl1n_s64(op1, op2, op3) } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svbsl1n_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svbsl1n_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svbsl1n_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svbsl1n_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svbsl1n_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svbsl1n_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svbsl1n_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svbsl1n_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svbsl1n_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv16i8")] + fn _svbsl2n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svbsl2n_s8(op1, op2, op3) } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svbsl2n_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv8i16")] + fn _svbsl2n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svbsl2n_s16(op1, op2, op3) } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svbsl2n_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv4i32")] + fn _svbsl2n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svbsl2n_s32(op1, op2, op3) } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svbsl2n_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv2i64")] + fn _svbsl2n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svbsl2n_s64(op1, op2, op3) } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svbsl2n_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svbsl2n_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svbsl2n_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svbsl2n_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svbsl2n_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svbsl2n_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svbsl2n_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svbsl2n_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svbsl2n_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv16i8")] + fn _svbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svbsl_s8(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svbsl_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv8i16")] + fn _svbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svbsl_s16(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svbsl_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv4i32")] + fn _svbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svbsl_s32(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svbsl_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv2i64")] + fn _svbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svbsl_s64(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svbsl_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svbsl_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svbsl_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svbsl_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svbsl_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svbsl_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svbsl_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svbsl_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svbsl_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv16i8")] + fn _svcadd_s8(op1: svint8_t, op2: svint8_t, imm_rotation: i32) -> svint8_t; + } + unsafe { _svcadd_s8(op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv8i16")] + fn _svcadd_s16(op1: svint16_t, op2: svint16_t, imm_rotation: i32) -> svint16_t; + } + unsafe { _svcadd_s16(op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv4i32")] + fn _svcadd_s32(op1: svint32_t, op2: svint32_t, imm_rotation: i32) -> svint32_t; + } + unsafe { _svcadd_s32(op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv2i64")] + fn _svcadd_s64(op1: svint64_t, op2: svint64_t, imm_rotation: i32) -> svint64_t; + } + unsafe { _svcadd_s64(op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe { svcadd_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe { svcadd_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe { svcadd_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe { svcadd_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Complex dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cdot, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cdot.lane.nxv4i32" + )] + fn _svcdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svcdot_lane_s32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Complex dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cdot, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcdot_lane_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cdot.lane.nxv2i64" + )] + fn _svcdot_lane_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint64_t; + } + unsafe { _svcdot_lane_s64(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Complex dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cdot, IMM_ROTATION = 90))] +pub fn svcdot_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, +) -> svint32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cdot.nxv4i32")] + fn _svcdot_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svcdot_s32(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cdot, IMM_ROTATION = 90))] +pub fn svcdot_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, +) -> svint64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cdot.nxv2i64")] + fn _svcdot_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, + imm_rotation: i32, + ) -> svint64_t; + } + unsafe { _svcdot_s64(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=3); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmla.lane.x.nxv8i16" + )] + fn _svcmla_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint16_t; + } + unsafe { _svcmla_lane_s16(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmla.lane.x.nxv4i32" + )] + fn _svcmla_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svcmla_lane_s32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_u16( + op1: svuint16_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0..=3); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_lane_s16::( + op1.as_signed(), + op2.as_signed(), + op3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_u32( + op1: svuint32_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_lane_s32::( + op1.as_signed(), + op2.as_signed(), + op3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv16i8")] + fn _svcmla_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t, imm_rotation: i32) -> svint8_t; + } + unsafe { _svcmla_s8(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv8i16")] + fn _svcmla_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + imm_rotation: i32, + ) -> svint16_t; + } + unsafe { _svcmla_s16(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv4i32")] + fn _svcmla_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svcmla_s32(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv2i64")] + fn _svcmla_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + imm_rotation: i32, + ) -> svint64_t; + } + unsafe { _svcmla_s64(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_u8( + op1: svuint8_t, + op2: svuint8_t, + op3: svuint8_t, +) -> svuint8_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_s8::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_u16( + op1: svuint16_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint16_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_s16::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_u32( + op1: svuint32_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_s32::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_u64( + op1: svuint64_t, + op2: svuint64_t, + op3: svuint64_t, +) -> svuint64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_s64::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Up convert long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtlt_f64[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtlt))] +pub fn svcvtlt_f64_f32_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtlt.f64f32")] + fn _svcvtlt_f64_f32_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat32_t) + -> svfloat64_t; + } + unsafe { _svcvtlt_f64_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Up convert long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtlt_f64[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtlt))] +pub fn svcvtlt_f64_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + unsafe { svcvtlt_f64_f32_m(crate::intrinsics::transmute_unchecked(op), pg, op) } +} +#[doc = "Down convert and narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtnt_f32[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtnt))] +pub fn svcvtnt_f32_f64_m(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtnt.f32f64")] + fn _svcvtnt_f32_f64_m(even: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t; + } + unsafe { _svcvtnt_f32_f64_m(even, pg.sve_into(), op) } +} +#[doc = "Down convert and narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtnt_f32[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtnt))] +pub fn svcvtnt_f32_f64_x(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + svcvtnt_f32_f64_m(even, pg, op) +} +#[doc = "Down convert, rounding to odd"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtx))] +pub fn svcvtx_f32_f64_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtx.f32f64")] + fn _svcvtx_f32_f64_m(inactive: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t; + } + unsafe { _svcvtx_f32_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Down convert, rounding to odd"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtx))] +pub fn svcvtx_f32_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe { svcvtx_f32_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Down convert, rounding to odd"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtx))] +pub fn svcvtx_f32_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + svcvtx_f32_f64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Down convert, rounding to odd (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtxnt_f32[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtxnt))] +pub fn svcvtxnt_f32_f64_m(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtxnt.f32f64")] + fn _svcvtxnt_f32_f64_m(even: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t; + } + unsafe { _svcvtxnt_f32_f64_m(even, pg.sve_into(), op) } +} +#[doc = "Down convert, rounding to odd (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtxnt_f32[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtxnt))] +pub fn svcvtxnt_f32_f64_x(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + svcvtxnt_f32_f64_m(even, pg, op) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv16i8")] + fn _sveor3_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _sveor3_s8(op1, op2, op3) } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + sveor3_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv8i16")] + fn _sveor3_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _sveor3_s16(op1, op2, op3) } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + sveor3_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv4i32")] + fn _sveor3_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _sveor3_s32(op1, op2, op3) } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + sveor3_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv2i64")] + fn _sveor3_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _sveor3_s64(op1, op2, op3) } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + sveor3_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { sveor3_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + sveor3_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { sveor3_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + sveor3_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { sveor3_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + sveor3_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { sveor3_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + sveor3_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_s8(odd: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv16i8")] + fn _sveorbt_s8(odd: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _sveorbt_s8(odd, op1, op2) } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_s8(odd: svint8_t, op1: svint8_t, op2: i8) -> svint8_t { + sveorbt_s8(odd, op1, svdup_n_s8(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_s16(odd: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv8i16")] + fn _sveorbt_s16(odd: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _sveorbt_s16(odd, op1, op2) } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_s16(odd: svint16_t, op1: svint16_t, op2: i16) -> svint16_t { + sveorbt_s16(odd, op1, svdup_n_s16(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_s32(odd: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv4i32")] + fn _sveorbt_s32(odd: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _sveorbt_s32(odd, op1, op2) } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_s32(odd: svint32_t, op1: svint32_t, op2: i32) -> svint32_t { + sveorbt_s32(odd, op1, svdup_n_s32(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_s64(odd: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv2i64")] + fn _sveorbt_s64(odd: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _sveorbt_s64(odd, op1, op2) } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_s64(odd: svint64_t, op1: svint64_t, op2: i64) -> svint64_t { + sveorbt_s64(odd, op1, svdup_n_s64(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_u8(odd: svuint8_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { sveorbt_s8(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_u8(odd: svuint8_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveorbt_u8(odd, op1, svdup_n_u8(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_u16(odd: svuint16_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { sveorbt_s16(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_u16(odd: svuint16_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveorbt_u16(odd, op1, svdup_n_u16(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_u32(odd: svuint32_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { sveorbt_s32(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_u32(odd: svuint32_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveorbt_u32(odd, op1, svdup_n_u32(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_u64(odd: svuint64_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { sveorbt_s64(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_u64(odd: svuint64_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveorbt_u64(odd, op1, svdup_n_u64(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_s8(even: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv16i8")] + fn _sveortb_s8(even: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _sveortb_s8(even, op1, op2) } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_s8(even: svint8_t, op1: svint8_t, op2: i8) -> svint8_t { + sveortb_s8(even, op1, svdup_n_s8(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_s16(even: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv8i16")] + fn _sveortb_s16(even: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _sveortb_s16(even, op1, op2) } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_s16(even: svint16_t, op1: svint16_t, op2: i16) -> svint16_t { + sveortb_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_s32(even: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv4i32")] + fn _sveortb_s32(even: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _sveortb_s32(even, op1, op2) } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_s32(even: svint32_t, op1: svint32_t, op2: i32) -> svint32_t { + sveortb_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_s64(even: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv2i64")] + fn _sveortb_s64(even: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _sveortb_s64(even, op1, op2) } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_s64(even: svint64_t, op1: svint64_t, op2: i64) -> svint64_t { + sveortb_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_u8(even: svuint8_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { sveortb_s8(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_u8(even: svuint8_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveortb_u8(even, op1, svdup_n_u8(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_u16(even: svuint16_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { sveortb_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_u16(even: svuint16_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveortb_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_u32(even: svuint32_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { sveortb_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_u32(even: svuint32_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveortb_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_u64(even: svuint64_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { sveortb_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_u64(even: svuint64_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveortb_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv16i8")] + fn _svhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhadd_s8_m(pg, op1, op2) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhadd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhadd_s8_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhadd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhadd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv8i16")] + fn _svhadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhadd_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhadd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhadd_s16_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhadd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhadd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv4i32")] + fn _svhadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhadd_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhadd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhadd_s32_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhadd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhadd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv2i64")] + fn _svhadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhadd_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhadd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhadd_s64_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhadd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhadd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv16i8")] + fn _svhadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhadd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhadd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhadd_u8_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhadd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhadd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv8i16")] + fn _svhadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhadd_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhadd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhadd_u16_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhadd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhadd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv4i32")] + fn _svhadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhadd_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhadd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhadd_u32_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhadd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhadd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv2i64")] + fn _svhadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhadd_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhadd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhadd_u64_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhadd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhadd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Count matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(histcnt))] +pub fn svhistcnt_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.histcnt.nxv4i32" + )] + fn _svhistcnt_s32_z(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhistcnt_s32_z(pg.sve_into(), op1, op2).as_unsigned() } +} +#[doc = "Count matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(histcnt))] +pub fn svhistcnt_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.histcnt.nxv2i64" + )] + fn _svhistcnt_s64_z(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhistcnt_s64_z(pg.sve_into(), op1, op2).as_unsigned() } +} +#[doc = "Count matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(histcnt))] +pub fn svhistcnt_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svhistcnt_s32_z(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Count matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(histcnt))] +pub fn svhistcnt_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svhistcnt_s64_z(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Count matching elements in 128-bit segments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistseg[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(histseg))] +pub fn svhistseg_s8(op1: svint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.histseg.nxv16i8" + )] + fn _svhistseg_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhistseg_s8(op1, op2).as_unsigned() } +} +#[doc = "Count matching elements in 128-bit segments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistseg[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(histseg))] +pub fn svhistseg_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svhistseg_s8(op1.as_signed(), op2.as_signed()) } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv16i8")] + fn _svhsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhsub_s8_m(pg, op1, op2) } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsub_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhsub_s8_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsub_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsub_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv8i16")] + fn _svhsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhsub_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsub_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhsub_s16_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsub_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsub_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv4i32")] + fn _svhsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhsub_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsub_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhsub_s32_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsub_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsub_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv2i64")] + fn _svhsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhsub_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsub_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhsub_s64_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsub_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsub_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv16i8")] + fn _svhsub_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhsub_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsub_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhsub_u8_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsub_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsub_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv8i16")] + fn _svhsub_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhsub_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsub_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhsub_u16_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsub_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsub_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv4i32")] + fn _svhsub_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhsub_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsub_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhsub_u32_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsub_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsub_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv2i64")] + fn _svhsub_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhsub_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsub_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhsub_u64_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsub_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsub_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv16i8")] + fn _svhsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhsubr_s8_m(pg, op1, op2) } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsubr_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhsubr_s8_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsubr_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsubr_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv8i16")] + fn _svhsubr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhsubr_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsubr_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhsubr_s16_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsubr_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsubr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv4i32")] + fn _svhsubr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhsubr_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsubr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhsubr_s32_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsubr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsubr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv2i64")] + fn _svhsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhsubr_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsubr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhsubr_s64_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsubr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsubr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv16i8")] + fn _svhsubr_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhsubr_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsubr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhsubr_u8_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsubr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsubr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv8i16")] + fn _svhsubr_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhsubr_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsubr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhsubr_u16_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsubr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsubr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv4i32")] + fn _svhsubr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhsubr_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsubr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhsubr_u32_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsubr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsubr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv2i64")] + fn _svhsubr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhsubr_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsubr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhsubr_u64_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsubr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsubr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64index_f64( + pg: svbool_t, + base: *const f64, + indices: svint64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2f64" + )] + fn _svldnt1_gather_s64index_f64( + pg: svbool2_t, + base: *const f64, + indices: svint64_t, + ) -> svfloat64_t; + } + _svldnt1_gather_s64index_f64(pg.sve_into(), base, indices) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64index_s64( + pg: svbool_t, + base: *const i64, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i64" + )] + fn _svldnt1_gather_s64index_s64( + pg: svbool2_t, + base: *const i64, + indices: svint64_t, + ) -> svint64_t; + } + _svldnt1_gather_s64index_s64(pg.sve_into(), base, indices) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64index_u64( + pg: svbool_t, + base: *const u64, + indices: svint64_t, +) -> svuint64_t { + svldnt1_gather_s64index_s64(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64index_f64( + pg: svbool_t, + base: *const f64, + indices: svuint64_t, +) -> svfloat64_t { + svldnt1_gather_s64index_f64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64index_s64( + pg: svbool_t, + base: *const i64, + indices: svuint64_t, +) -> svint64_t { + svldnt1_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64index_u64( + pg: svbool_t, + base: *const u64, + indices: svuint64_t, +) -> svuint64_t { + svldnt1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svint64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2f64" + )] + fn _svldnt1_gather_s64offset_f64( + pg: svbool2_t, + base: *const f64, + offsets: svint64_t, + ) -> svfloat64_t; + } + _svldnt1_gather_s64offset_f64(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i64" + )] + fn _svldnt1_gather_s64offset_s64( + pg: svbool2_t, + base: *const i64, + offsets: svint64_t, + ) -> svint64_t; + } + _svldnt1_gather_s64offset_s64(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svint64_t, +) -> svuint64_t { + svldnt1_gather_s64offset_s64(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svuint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4f32" + )] + fn _svldnt1_gather_u32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svldnt1_gather_u32offset_f32(pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i32" + )] + fn _svldnt1_gather_u32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svldnt1_gather_u32offset_s32(pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svuint32_t, +) -> svuint32_t { + svldnt1_gather_u32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svuint64_t, +) -> svfloat64_t { + svldnt1_gather_s64offset_f64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svuint64_t, +) -> svint64_t { + svldnt1_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1_gather_s64offset_s64(pg, base.as_signed(), offsets.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svfloat32_t { + svldnt1_gather_u32base_offset_f32(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svfloat64_t { + svldnt1_gather_u64base_offset_f64(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_index_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svfloat32_t { + svldnt1_gather_u32base_offset_f32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldnt1_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldnt1_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_index_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svfloat64_t { + svldnt1_gather_u64base_offset_f64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_offset_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svldnt1_gather_u32base_offset_f32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svfloat32_t; + } + _svldnt1_gather_u32base_offset_f32(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svldnt1_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svint32_t; + } + _svldnt1_gather_u32base_offset_s32(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldnt1_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_offset_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svldnt1_gather_u64base_offset_f64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svfloat64_t; + } + _svldnt1_gather_u64base_offset_f64(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svldnt1_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svint64_t; + } + _svldnt1_gather_u64base_offset_s64(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldnt1_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_s64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i8" + )] + fn _svldnt1sb_gather_s64offset_s64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast(_svldnt1sb_gather_s64offset_s64( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_s64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i16" + )] + fn _svldnt1sh_gather_s64offset_s64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svldnt1sh_gather_s64offset_s64( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_s64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i32" + )] + fn _svldnt1sw_gather_s64offset_s64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svldnt1sw_gather_s64offset_s64( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_s64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svuint64_t { + svldnt1sb_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_s64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svuint64_t { + svldnt1sh_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_s64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svuint64_t { + svldnt1sw_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8" + )] + fn _svldnt1sb_gather_u32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast(_svldnt1sb_gather_u32offset_s32( + pg.sve_into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16" + )] + fn _svldnt1sh_gather_u32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svldnt1sh_gather_u32offset_s32( + pg.sve_into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svuint32_t { + svldnt1sb_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svuint32_t { + svldnt1sh_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svint64_t { + svldnt1sb_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svint64_t { + svldnt1sh_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svint64_t { + svldnt1sw_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1sb_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1sh_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1sw_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svldnt1sb_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast(_svldnt1sb_gather_u32base_offset_s32( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svldnt1sh_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svldnt1sh_gather_u32base_offset_s32( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldnt1sb_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldnt1sh_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svldnt1sb_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast(_svldnt1sb_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svldnt1sh_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svldnt1sh_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svldnt1sw_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svldnt1sw_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldnt1sb_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldnt1sh_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldnt1sw_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1sb_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1sh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1sb_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1sh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1sb_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1sh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1sw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1sb_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1sh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1sw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_s64index_s64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i16" + )] + fn _svldnt1sh_gather_s64index_s64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svldnt1sh_gather_s64index_s64(pg.sve_into(), base, indices)) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_s64index_s64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i32" + )] + fn _svldnt1sw_gather_s64index_s64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svldnt1sw_gather_s64index_s64(pg.sve_into(), base, indices)) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_s64index_u64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svuint64_t { + svldnt1sh_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_s64index_u64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svuint64_t { + svldnt1sw_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64index_s64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svint64_t { + svldnt1sh_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64index_s64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svint64_t { + svldnt1sw_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64index_u64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svuint64_t { + svldnt1sh_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64index_u64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svuint64_t { + svldnt1sw_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldnt1sh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldnt1sh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1sh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1sw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1sh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1sw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_s64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svint64_t { + svldnt1ub_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_s64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svint64_t { + svldnt1uh_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_s64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svint64_t { + svldnt1uw_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_s64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i8" + )] + fn _svldnt1ub_gather_s64offset_u64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1ub_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_s64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i16" + )] + fn _svldnt1uh_gather_s64offset_u64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1uh_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_s64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i32" + )] + fn _svldnt1uw_gather_s64offset_u64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1uw_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svint32_t { + svldnt1ub_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svint32_t { + svldnt1uh_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8" + )] + fn _svldnt1ub_gather_u32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1ub_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16" + )] + fn _svldnt1uh_gather_u32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1uh_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svint64_t { + svldnt1ub_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svint64_t { + svldnt1uh_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svint64_t { + svldnt1uw_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1ub_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1uh_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1uw_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svldnt1ub_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svldnt1uh_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svldnt1ub_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1ub_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svldnt1uh_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1uh_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldnt1ub_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldnt1uh_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldnt1uw_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svldnt1ub_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1ub_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svldnt1uh_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1uh_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svldnt1uw_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1uw_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1ub_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1uh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1ub_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1uh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1ub_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1uh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1uw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1ub_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1uh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1uw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_s64index_s64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svint64_t { + svldnt1uh_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_s64index_s64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svint64_t { + svldnt1uw_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_s64index_u64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i16" + )] + fn _svldnt1uh_gather_s64index_u64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1uh_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_s64index_u64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i32" + )] + fn _svldnt1uw_gather_s64index_u64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1uw_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64index_s64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svint64_t { + svldnt1uh_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64index_s64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svint64_t { + svldnt1uw_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64index_u64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svuint64_t { + svldnt1uh_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64index_u64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svuint64_t { + svldnt1uw_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldnt1uh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldnt1uh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1uh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1uw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1uh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1uw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Base 2 logarithm as integer"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f32_m(inactive: svint32_t, pg: svbool_t, op: svfloat32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.flogb.nxv4f32")] + fn _svlogb_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t; + } + unsafe { _svlogb_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Base 2 logarithm as integer"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f32_x(pg: svbool_t, op: svfloat32_t) -> svint32_t { + unsafe { svlogb_f32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Base 2 logarithm as integer"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f32_z(pg: svbool_t, op: svfloat32_t) -> svint32_t { + svlogb_f32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Base 2 logarithm as integer"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f64_m(inactive: svint64_t, pg: svbool_t, op: svfloat64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.flogb.nxv2f64")] + fn _svlogb_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t; + } + unsafe { _svlogb_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Base 2 logarithm as integer"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f64_x(pg: svbool_t, op: svfloat64_t) -> svint64_t { + unsafe { svlogb_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Base 2 logarithm as integer"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f64_z(pg: svbool_t, op: svfloat64_t) -> svint64_t { + svlogb_f64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Detect any matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(match))] +pub fn svmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.match.nxv16i8")] + fn _svmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svmatch_s8(pg, op1, op2) } +} +#[doc = "Detect any matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(match))] +pub fn svmatch_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.match.nxv8i16")] + fn _svmatch_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svmatch_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Detect any matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(match))] +pub fn svmatch_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svmatch_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Detect any matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(match))] +pub fn svmatch_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svmatch_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Maximum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub fn svmaxnmp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmaxnmp.nxv4f32" + )] + fn _svmaxnmp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmaxnmp_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub fn svmaxnmp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmaxnmp_f32_m(pg, op1, op2) +} +#[doc = "Maximum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub fn svmaxnmp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmaxnmp.nxv2f64" + )] + fn _svmaxnmp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmaxnmp_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub fn svmaxnmp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmaxnmp_f64_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn svmaxp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxp.nxv4f32")] + fn _svmaxp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmaxp_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn svmaxp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmaxp_f32_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn svmaxp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxp.nxv2f64")] + fn _svmaxp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmaxp_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn svmaxp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmaxp_f64_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv16i8")] + fn _svmaxp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmaxp_s8_m(pg, op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmaxp_s8_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv8i16")] + fn _svmaxp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmaxp_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmaxp_s16_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv4i32")] + fn _svmaxp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmaxp_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmaxp_s32_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv2i64")] + fn _svmaxp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmaxp_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmaxp_s64_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv16i8")] + fn _svmaxp_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmaxp_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmaxp_u8_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv8i16")] + fn _svmaxp_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmaxp_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmaxp_u16_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv4i32")] + fn _svmaxp_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmaxp_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmaxp_u32_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv2i64")] + fn _svmaxp_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmaxp_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmaxp_u64_m(pg, op1, op2) +} +#[doc = "Minimum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnmp))] +pub fn svminnmp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fminnmp.nxv4f32" + )] + fn _svminnmp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svminnmp_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnmp))] +pub fn svminnmp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svminnmp_f32_m(pg, op1, op2) +} +#[doc = "Minimum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnmp))] +pub fn svminnmp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fminnmp.nxv2f64" + )] + fn _svminnmp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svminnmp_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnmp))] +pub fn svminnmp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svminnmp_f64_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn svminp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminp.nxv4f32")] + fn _svminp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svminp_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn svminp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svminp_f32_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn svminp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminp.nxv2f64")] + fn _svminp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svminp_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn svminp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svminp_f64_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv16i8")] + fn _svminp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svminp_s8_m(pg, op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svminp_s8_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv8i16")] + fn _svminp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svminp_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svminp_s16_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv4i32")] + fn _svminp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svminp_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svminp_s32_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv2i64")] + fn _svminp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svminp_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svminp_s64_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv16i8")] + fn _svminp_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svminp_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svminp_u8_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv8i16")] + fn _svminp_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svminp_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svminp_u16_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv4i32")] + fn _svminp_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svminp_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svminp_u32_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv2i64")] + fn _svminp_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svminp_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svminp_u64_m(pg, op1, op2) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mla.lane.nxv8i16" + )] + fn _svmla_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint16_t; + } + unsafe { _svmla_lane_s16(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mla.lane.nxv4i32" + )] + fn _svmla_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmla_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mla.lane.nxv2i64" + )] + fn _svmla_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmla_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_u16( + op1: svuint16_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe { + svmla_lane_s16::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_u32( + op1: svuint32_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { + svmla_lane_s32::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_u64( + op1: svuint64_t, + op2: svuint64_t, + op3: svuint64_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { + svmla_lane_s64::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb, IMM_INDEX = 0))] +pub fn svmlalb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlalb.lane.nxv4i32" + )] + fn _svmlalb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmlalb_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb, IMM_INDEX = 0))] +pub fn svmlalb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlalb.lane.nxv2i64" + )] + fn _svmlalb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmlalb_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb, IMM_INDEX = 0))] +pub fn svmlalb_lane_u32( + op1: svuint32_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlalb.lane.nxv4i32" + )] + fn _svmlalb_lane_u32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { + _svmlalb_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb, IMM_INDEX = 0))] +pub fn svmlalb_lane_u64( + op1: svuint64_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlalb.lane.nxv2i64" + )] + fn _svmlalb_lane_u64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { + _svmlalb_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalb.nxv8i16")] + fn _svmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlalb_s16(op1, op2, op3) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svmlalb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalb.nxv4i32")] + fn _svmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlalb_s32(op1, op2, op3) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svmlalb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalb.nxv2i64")] + fn _svmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlalb_s64(op1, op2, op3) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svmlalb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalb.nxv8i16")] + fn _svmlalb_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlalb_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svmlalb_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalb.nxv4i32")] + fn _svmlalb_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlalb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svmlalb_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalb.nxv2i64")] + fn _svmlalb_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlalb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svmlalb_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt, IMM_INDEX = 0))] +pub fn svmlalt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlalt.lane.nxv4i32" + )] + fn _svmlalt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmlalt_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt, IMM_INDEX = 0))] +pub fn svmlalt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlalt.lane.nxv2i64" + )] + fn _svmlalt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmlalt_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt, IMM_INDEX = 0))] +pub fn svmlalt_lane_u32( + op1: svuint32_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlalt.lane.nxv4i32" + )] + fn _svmlalt_lane_u32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { + _svmlalt_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt, IMM_INDEX = 0))] +pub fn svmlalt_lane_u64( + op1: svuint64_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlalt.lane.nxv2i64" + )] + fn _svmlalt_lane_u64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { + _svmlalt_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalt.nxv8i16")] + fn _svmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlalt_s16(op1, op2, op3) } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svmlalt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalt.nxv4i32")] + fn _svmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlalt_s32(op1, op2, op3) } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svmlalt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalt.nxv2i64")] + fn _svmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlalt_s64(op1, op2, op3) } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svmlalt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalt.nxv8i16")] + fn _svmlalt_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlalt_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svmlalt_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalt.nxv4i32")] + fn _svmlalt_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlalt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svmlalt_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalt.nxv2i64")] + fn _svmlalt_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlalt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svmlalt_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mls.lane.nxv8i16" + )] + fn _svmls_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint16_t; + } + unsafe { _svmls_lane_s16(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mls.lane.nxv4i32" + )] + fn _svmls_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmls_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mls.lane.nxv2i64" + )] + fn _svmls_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmls_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_u16( + op1: svuint16_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe { + svmls_lane_s16::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_u32( + op1: svuint32_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { + svmls_lane_s32::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_u64( + op1: svuint64_t, + op2: svuint64_t, + op3: svuint64_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { + svmls_lane_s64::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb, IMM_INDEX = 0))] +pub fn svmlslb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlslb.lane.nxv4i32" + )] + fn _svmlslb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmlslb_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb, IMM_INDEX = 0))] +pub fn svmlslb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlslb.lane.nxv2i64" + )] + fn _svmlslb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmlslb_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb, IMM_INDEX = 0))] +pub fn svmlslb_lane_u32( + op1: svuint32_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlslb.lane.nxv4i32" + )] + fn _svmlslb_lane_u32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { + _svmlslb_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb, IMM_INDEX = 0))] +pub fn svmlslb_lane_u64( + op1: svuint64_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlslb.lane.nxv2i64" + )] + fn _svmlslb_lane_u64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { + _svmlslb_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslb.nxv8i16")] + fn _svmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlslb_s16(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svmlslb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslb.nxv4i32")] + fn _svmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlslb_s32(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svmlslb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslb.nxv2i64")] + fn _svmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlslb_s64(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svmlslb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslb.nxv8i16")] + fn _svmlslb_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlslb_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svmlslb_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslb.nxv4i32")] + fn _svmlslb_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlslb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svmlslb_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslb.nxv2i64")] + fn _svmlslb_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlslb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svmlslb_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt, IMM_INDEX = 0))] +pub fn svmlslt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlslt.lane.nxv4i32" + )] + fn _svmlslt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmlslt_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt, IMM_INDEX = 0))] +pub fn svmlslt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlslt.lane.nxv2i64" + )] + fn _svmlslt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmlslt_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt, IMM_INDEX = 0))] +pub fn svmlslt_lane_u32( + op1: svuint32_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlslt.lane.nxv4i32" + )] + fn _svmlslt_lane_u32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { + _svmlslt_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt, IMM_INDEX = 0))] +pub fn svmlslt_lane_u64( + op1: svuint64_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlslt.lane.nxv2i64" + )] + fn _svmlslt_lane_u64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { + _svmlslt_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslt.nxv8i16")] + fn _svmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlslt_s16(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svmlslt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslt.nxv4i32")] + fn _svmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlslt_s32(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svmlslt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslt.nxv2i64")] + fn _svmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlslt_s64(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svmlslt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslt.nxv8i16")] + fn _svmlslt_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlslt_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svmlslt_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslt.nxv4i32")] + fn _svmlslt_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlslt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svmlslt_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslt.nxv2i64")] + fn _svmlslt_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlslt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svmlslt_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Move long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllb))] +pub fn svmovlb_s16(op: svint8_t) -> svint16_t { + svshllb_n_s16::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllb))] +pub fn svmovlb_s32(op: svint16_t) -> svint32_t { + svshllb_n_s32::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllb))] +pub fn svmovlb_s64(op: svint32_t) -> svint64_t { + svshllb_n_s64::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllb))] +pub fn svmovlb_u16(op: svuint8_t) -> svuint16_t { + svshllb_n_u16::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllb))] +pub fn svmovlb_u32(op: svuint16_t) -> svuint32_t { + svshllb_n_u32::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllb))] +pub fn svmovlb_u64(op: svuint32_t) -> svuint64_t { + svshllb_n_u64::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllt))] +pub fn svmovlt_s16(op: svint8_t) -> svint16_t { + svshllt_n_s16::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllt))] +pub fn svmovlt_s32(op: svint16_t) -> svint32_t { + svshllt_n_s32::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllt))] +pub fn svmovlt_s64(op: svint32_t) -> svint64_t { + svshllt_n_s64::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllt))] +pub fn svmovlt_u16(op: svuint8_t) -> svuint16_t { + svshllt_n_u16::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllt))] +pub fn svmovlt_u32(op: svuint16_t) -> svuint32_t { + svshllt_n_u32::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllt))] +pub fn svmovlt_u64(op: svuint32_t) -> svuint64_t { + svshllt_n_u64::<0>(op) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul, IMM_INDEX = 0))] +pub fn svmul_lane_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmul.lane.nxv4f32" + )] + fn _svmul_lane_f32(op1: svfloat32_t, op2: svfloat32_t, imm_index: i32) -> svfloat32_t; + } + unsafe { _svmul_lane_f32(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul, IMM_INDEX = 0))] +pub fn svmul_lane_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmul.lane.nxv2f64" + )] + fn _svmul_lane_f64(op1: svfloat64_t, op2: svfloat64_t, imm_index: i32) -> svfloat64_t; + } + unsafe { _svmul_lane_f64(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mul.lane.nxv8i16" + )] + fn _svmul_lane_s16(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint16_t; + } + unsafe { _svmul_lane_s16(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mul.lane.nxv4i32" + )] + fn _svmul_lane_s32(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmul_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mul.lane.nxv2i64" + )] + fn _svmul_lane_s64(op1: svint64_t, op2: svint64_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmul_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe { svmul_lane_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { svmul_lane_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { svmul_lane_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb, IMM_INDEX = 0))] +pub fn svmullb_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smullb.lane.nxv4i32" + )] + fn _svmullb_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmullb_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb, IMM_INDEX = 0))] +pub fn svmullb_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smullb.lane.nxv2i64" + )] + fn _svmullb_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmullb_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb, IMM_INDEX = 0))] +pub fn svmullb_lane_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umullb.lane.nxv4i32" + )] + fn _svmullb_lane_u32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmullb_lane_u32(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb, IMM_INDEX = 0))] +pub fn svmullb_lane_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umullb.lane.nxv2i64" + )] + fn _svmullb_lane_u64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmullb_lane_u64(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullb.nxv8i16")] + fn _svmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svmullb_s16(op1, op2) } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svmullb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullb.nxv4i32")] + fn _svmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svmullb_s32(op1, op2) } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svmullb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullb.nxv2i64")] + fn _svmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svmullb_s64(op1, op2) } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svmullb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullb.nxv8i16")] + fn _svmullb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svmullb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svmullb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullb.nxv4i32")] + fn _svmullb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svmullb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svmullb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullb.nxv2i64")] + fn _svmullb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svmullb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svmullb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt, IMM_INDEX = 0))] +pub fn svmullt_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smullt.lane.nxv4i32" + )] + fn _svmullt_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmullt_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt, IMM_INDEX = 0))] +pub fn svmullt_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smullt.lane.nxv2i64" + )] + fn _svmullt_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmullt_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt, IMM_INDEX = 0))] +pub fn svmullt_lane_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umullt.lane.nxv4i32" + )] + fn _svmullt_lane_u32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmullt_lane_u32(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt, IMM_INDEX = 0))] +pub fn svmullt_lane_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umullt.lane.nxv2i64" + )] + fn _svmullt_lane_u64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmullt_lane_u64(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv8i16")] + fn _svmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svmullt_s16(op1, op2) } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svmullt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv4i32")] + fn _svmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svmullt_s32(op1, op2) } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svmullt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv2i64")] + fn _svmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svmullt_s64(op1, op2) } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svmullt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv8i16")] + fn _svmullt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svmullt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svmullt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv4i32")] + fn _svmullt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svmullt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svmullt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv2i64")] + fn _svmullt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svmullt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svmullt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv16i8")] + fn _svnbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svnbsl_s8(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svnbsl_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv8i16")] + fn _svnbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svnbsl_s16(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svnbsl_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv4i32")] + fn _svnbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svnbsl_s32(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svnbsl_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv2i64")] + fn _svnbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svnbsl_s64(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svnbsl_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svnbsl_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svnbsl_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svnbsl_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svnbsl_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svnbsl_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svnbsl_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svnbsl_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svnbsl_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Detect no matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nmatch))] +pub fn svnmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nmatch.nxv16i8")] + fn _svnmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svnmatch_s8(pg, op1, op2) } +} +#[doc = "Detect no matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nmatch))] +pub fn svnmatch_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nmatch.nxv8i16")] + fn _svnmatch_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svnmatch_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Detect no matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nmatch))] +pub fn svnmatch_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svnmatch_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Detect no matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nmatch))] +pub fn svnmatch_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svnmatch_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Polynomial multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmul[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmul))] +pub fn svpmul_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pmul.nxv16i8")] + fn _svpmul_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svpmul_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmul[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmul))] +pub fn svpmul_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svpmul_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullb.pair.nxv16i8" + )] + fn _svpmullb_pair_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svpmullb_pair_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svpmullb_pair_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullb.pair.nxv4i32" + )] + fn _svpmullb_pair_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svpmullb_pair_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svpmullb_pair_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullb.pair.nxv2i64" + )] + fn _svpmullb_pair_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svpmullb_pair_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svpmullb_pair_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(svpmullb_pair_u8(op1, op2)) } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svpmullb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(svpmullb_pair_u32(op1, op2)) } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svpmullb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullt.pair.nxv16i8" + )] + fn _svpmullt_pair_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svpmullt_pair_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svpmullt_pair_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullt.pair.nxv4i32" + )] + fn _svpmullt_pair_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svpmullt_pair_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svpmullt_pair_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullt.pair.nxv2i64" + )] + fn _svpmullt_pair_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svpmullt_pair_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svpmullt_pair_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(svpmullt_pair_u8(op1, op2)) } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svpmullt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(svpmullt_pair_u32(op1, op2)) } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svpmullt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv16i8")] + fn _svqabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svqabs_s8_m(inactive, pg, op) } +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svqabs_s8_m(op, pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svqabs_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv8i16")] + fn _svqabs_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svqabs_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svqabs_s16_m(op, pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svqabs_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv4i32")] + fn _svqabs_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svqabs_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svqabs_s32_m(op, pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svqabs_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv2i64")] + fn _svqabs_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svqabs_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svqabs_s64_m(op, pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svqabs_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv16i8")] + fn _svqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqadd_s8_m(pg, op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqadd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqadd_s8_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqadd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqadd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv8i16")] + fn _svqadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqadd_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqadd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqadd_s16_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqadd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqadd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv4i32")] + fn _svqadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqadd_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqadd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqadd_s32_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqadd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqadd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv2i64")] + fn _svqadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqadd_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqadd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqadd_s64_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqadd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqadd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv16i8")] + fn _svqadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqadd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqadd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqadd_u8_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqadd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqadd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv8i16")] + fn _svqadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqadd_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqadd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqadd_u16_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqadd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqadd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv4i32")] + fn _svqadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqadd_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqadd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqadd_u32_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqadd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqadd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv2i64")] + fn _svqadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqadd_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqadd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqadd_u64_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqadd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqadd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] +pub fn svqcadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqcadd.x.nxv16i8" + )] + fn _svqcadd_s8(op1: svint8_t, op2: svint8_t, imm_rotation: i32) -> svint8_t; + } + unsafe { _svqcadd_s8(op1, op2, IMM_ROTATION) } +} +#[doc = "Saturating complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] +pub fn svqcadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqcadd.x.nxv8i16" + )] + fn _svqcadd_s16(op1: svint16_t, op2: svint16_t, imm_rotation: i32) -> svint16_t; + } + unsafe { _svqcadd_s16(op1, op2, IMM_ROTATION) } +} +#[doc = "Saturating complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] +pub fn svqcadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqcadd.x.nxv4i32" + )] + fn _svqcadd_s32(op1: svint32_t, op2: svint32_t, imm_rotation: i32) -> svint32_t; + } + unsafe { _svqcadd_s32(op1, op2, IMM_ROTATION) } +} +#[doc = "Saturating complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] +pub fn svqcadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqcadd.x.nxv2i64" + )] + fn _svqcadd_s64(op1: svint64_t, op2: svint64_t, imm_rotation: i32) -> svint64_t; + } + unsafe { _svqcadd_s64(op1, op2, IMM_ROTATION) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb, IMM_INDEX = 0))] +pub fn svqdmlalb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.lane.nxv4i32" + )] + fn _svqdmlalb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqdmlalb_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb, IMM_INDEX = 0))] +pub fn svqdmlalb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.lane.nxv2i64" + )] + fn _svqdmlalb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqdmlalb_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.nxv8i16" + )] + fn _svqdmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlalb_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlalb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.nxv4i32" + )] + fn _svqdmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlalb_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlalb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.nxv2i64" + )] + fn _svqdmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlalb_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlalb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalbt.nxv8i16" + )] + fn _svqdmlalbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlalbt_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlalbt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalbt.nxv4i32" + )] + fn _svqdmlalbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlalbt_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlalbt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalbt.nxv2i64" + )] + fn _svqdmlalbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlalbt_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlalbt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt, IMM_INDEX = 0))] +pub fn svqdmlalt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.lane.nxv4i32" + )] + fn _svqdmlalt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqdmlalt_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt, IMM_INDEX = 0))] +pub fn svqdmlalt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.lane.nxv2i64" + )] + fn _svqdmlalt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqdmlalt_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.nxv8i16" + )] + fn _svqdmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlalt_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlalt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.nxv4i32" + )] + fn _svqdmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlalt_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlalt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.nxv2i64" + )] + fn _svqdmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlalt_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlalt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb, IMM_INDEX = 0))] +pub fn svqdmlslb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.lane.nxv4i32" + )] + fn _svqdmlslb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqdmlslb_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb, IMM_INDEX = 0))] +pub fn svqdmlslb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.lane.nxv2i64" + )] + fn _svqdmlslb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqdmlslb_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.nxv8i16" + )] + fn _svqdmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlslb_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlslb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.nxv4i32" + )] + fn _svqdmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlslb_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlslb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.nxv2i64" + )] + fn _svqdmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlslb_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlslb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslbt.nxv8i16" + )] + fn _svqdmlslbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlslbt_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlslbt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslbt.nxv4i32" + )] + fn _svqdmlslbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlslbt_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlslbt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslbt.nxv2i64" + )] + fn _svqdmlslbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlslbt_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlslbt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt, IMM_INDEX = 0))] +pub fn svqdmlslt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.lane.nxv4i32" + )] + fn _svqdmlslt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqdmlslt_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt, IMM_INDEX = 0))] +pub fn svqdmlslt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.lane.nxv2i64" + )] + fn _svqdmlslt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqdmlslt_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.nxv8i16" + )] + fn _svqdmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlslt_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlslt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.nxv4i32" + )] + fn _svqdmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlslt_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlslt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.nxv2i64" + )] + fn _svqdmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlslt_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlslt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))] +pub fn svqdmulh_lane_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.lane.nxv8i16" + )] + fn _svqdmulh_lane_s16(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint16_t; + } + unsafe { _svqdmulh_lane_s16(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))] +pub fn svqdmulh_lane_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.lane.nxv4i32" + )] + fn _svqdmulh_lane_s32(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint32_t; + } + unsafe { _svqdmulh_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))] +pub fn svqdmulh_lane_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.lane.nxv2i64" + )] + fn _svqdmulh_lane_s64(op1: svint64_t, op2: svint64_t, imm_index: i32) -> svint64_t; + } + unsafe { _svqdmulh_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.nxv16i8" + )] + fn _svqdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqdmulh_s8(op1, op2) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + svqdmulh_s8(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.nxv8i16" + )] + fn _svqdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqdmulh_s16(op1, op2) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + svqdmulh_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.nxv4i32" + )] + fn _svqdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqdmulh_s32(op1, op2) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + svqdmulh_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.nxv2i64" + )] + fn _svqdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqdmulh_s64(op1, op2) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + svqdmulh_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb, IMM_INDEX = 0))] +pub fn svqdmullb_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.lane.nxv4i32" + )] + fn _svqdmullb_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svqdmullb_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb, IMM_INDEX = 0))] +pub fn svqdmullb_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.lane.nxv2i64" + )] + fn _svqdmullb_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svqdmullb_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.nxv8i16" + )] + fn _svqdmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svqdmullb_s16(op1, op2) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svqdmullb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.nxv4i32" + )] + fn _svqdmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svqdmullb_s32(op1, op2) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svqdmullb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.nxv2i64" + )] + fn _svqdmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svqdmullb_s64(op1, op2) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svqdmullb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt, IMM_INDEX = 0))] +pub fn svqdmullt_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.lane.nxv4i32" + )] + fn _svqdmullt_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svqdmullt_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt, IMM_INDEX = 0))] +pub fn svqdmullt_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.lane.nxv2i64" + )] + fn _svqdmullt_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svqdmullt_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.nxv8i16" + )] + fn _svqdmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svqdmullt_s16(op1, op2) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svqdmullt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.nxv4i32" + )] + fn _svqdmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svqdmullt_s32(op1, op2) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svqdmullt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.nxv2i64" + )] + fn _svqdmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svqdmullt_s64(op1, op2) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svqdmullt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv16i8")] + fn _svqneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svqneg_s8_m(inactive, pg, op) } +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svqneg_s8_m(op, pg, op) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svqneg_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv8i16")] + fn _svqneg_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svqneg_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svqneg_s16_m(op, pg, op) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svqneg_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv4i32")] + fn _svqneg_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svqneg_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svqneg_s32_m(op, pg, op) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svqneg_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv2i64")] + fn _svqneg_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svqneg_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svqneg_s64_m(op, pg, op) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svqneg_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svqrdcmlah_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=3); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.lane.x.nxv8i16" + )] + fn _svqrdcmlah_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint16_t; + } + unsafe { _svqrdcmlah_lane_s16(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svqrdcmlah_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.lane.x.nxv4i32" + )] + fn _svqrdcmlah_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svqrdcmlah_lane_s32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] +pub fn svqrdcmlah_s8( + op1: svint8_t, + op2: svint8_t, + op3: svint8_t, +) -> svint8_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv16i8" + )] + fn _svqrdcmlah_s8( + op1: svint8_t, + op2: svint8_t, + op3: svint8_t, + imm_rotation: i32, + ) -> svint8_t; + } + unsafe { _svqrdcmlah_s8(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] +pub fn svqrdcmlah_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv8i16" + )] + fn _svqrdcmlah_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + imm_rotation: i32, + ) -> svint16_t; + } + unsafe { _svqrdcmlah_s16(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] +pub fn svqrdcmlah_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv4i32" + )] + fn _svqrdcmlah_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svqrdcmlah_s32(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] +pub fn svqrdcmlah_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv2i64" + )] + fn _svqrdcmlah_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + imm_rotation: i32, + ) -> svint64_t; + } + unsafe { _svqrdcmlah_s64(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))] +pub fn svqrdmlah_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.lane.nxv8i16" + )] + fn _svqrdmlah_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint16_t; + } + unsafe { _svqrdmlah_lane_s16(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))] +pub fn svqrdmlah_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.lane.nxv4i32" + )] + fn _svqrdmlah_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqrdmlah_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))] +pub fn svqrdmlah_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.lane.nxv2i64" + )] + fn _svqrdmlah_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqrdmlah_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.nxv16i8" + )] + fn _svqrdmlah_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svqrdmlah_s8(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svqrdmlah_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.nxv8i16" + )] + fn _svqrdmlah_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svqrdmlah_s16(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svqrdmlah_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.nxv4i32" + )] + fn _svqrdmlah_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svqrdmlah_s32(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svqrdmlah_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.nxv2i64" + )] + fn _svqrdmlah_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svqrdmlah_s64(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svqrdmlah_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))] +pub fn svqrdmlsh_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.lane.nxv8i16" + )] + fn _svqrdmlsh_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint16_t; + } + unsafe { _svqrdmlsh_lane_s16(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))] +pub fn svqrdmlsh_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.lane.nxv4i32" + )] + fn _svqrdmlsh_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqrdmlsh_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))] +pub fn svqrdmlsh_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.lane.nxv2i64" + )] + fn _svqrdmlsh_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqrdmlsh_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.nxv16i8" + )] + fn _svqrdmlsh_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svqrdmlsh_s8(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svqrdmlsh_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.nxv8i16" + )] + fn _svqrdmlsh_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svqrdmlsh_s16(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svqrdmlsh_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.nxv4i32" + )] + fn _svqrdmlsh_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svqrdmlsh_s32(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svqrdmlsh_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.nxv2i64" + )] + fn _svqrdmlsh_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svqrdmlsh_s64(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svqrdmlsh_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))] +pub fn svqrdmulh_lane_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.lane.nxv8i16" + )] + fn _svqrdmulh_lane_s16(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint16_t; + } + unsafe { _svqrdmulh_lane_s16(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))] +pub fn svqrdmulh_lane_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.lane.nxv4i32" + )] + fn _svqrdmulh_lane_s32(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint32_t; + } + unsafe { _svqrdmulh_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))] +pub fn svqrdmulh_lane_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.lane.nxv2i64" + )] + fn _svqrdmulh_lane_s64(op1: svint64_t, op2: svint64_t, imm_index: i32) -> svint64_t; + } + unsafe { _svqrdmulh_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.nxv16i8" + )] + fn _svqrdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqrdmulh_s8(op1, op2) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + svqrdmulh_s8(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.nxv8i16" + )] + fn _svqrdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqrdmulh_s16(op1, op2) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + svqrdmulh_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.nxv4i32" + )] + fn _svqrdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqrdmulh_s32(op1, op2) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + svqrdmulh_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.nxv2i64" + )] + fn _svqrdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqrdmulh_s64(op1, op2) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + svqrdmulh_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv16i8")] + fn _svqrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqrshl_s8_m(pg, op1, op2) } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqrshl_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqrshl_s8_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqrshl_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqrshl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqrshl_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv8i16")] + fn _svqrshl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqrshl_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqrshl_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqrshl_s16_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqrshl_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqrshl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqrshl_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv4i32")] + fn _svqrshl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqrshl_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqrshl_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqrshl_s32_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqrshl_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqrshl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqrshl_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv2i64")] + fn _svqrshl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqrshl_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqrshl_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqrshl_s64_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqrshl_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqrshl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqrshl_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv16i8")] + fn _svqrshl_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqrshl_u8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqrshl_u8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svqrshl_u8_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqrshl_u8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svqrshl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqrshl_u8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv8i16")] + fn _svqrshl_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqrshl_u16_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqrshl_u16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svqrshl_u16_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqrshl_u16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svqrshl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqrshl_u16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv4i32")] + fn _svqrshl_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqrshl_u32_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqrshl_u32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svqrshl_u32_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqrshl_u32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svqrshl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqrshl_u32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv2i64")] + fn _svqrshl_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqrshl_u64_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqrshl_u64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svqrshl_u64_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqrshl_u64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svqrshl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqrshl_u64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_s16(op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnb.nxv8i16" + )] + fn _svqrshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrnb_n_s16(op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_s32(op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnb.nxv4i32" + )] + fn _svqrshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrnb_n_s32(op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_s64(op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnb.nxv2i64" + )] + fn _svqrshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrnb_n_s64(op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_u16(op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnb.nxv8i16" + )] + fn _svqrshrnb_n_u16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrnb_n_u16(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_u32(op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnb.nxv4i32" + )] + fn _svqrshrnb_n_u32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrnb_n_u32(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_u64(op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnb.nxv2i64" + )] + fn _svqrshrnb_n_u64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrnb_n_u64(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnt.nxv8i16" + )] + fn _svqrshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrnt_n_s16(even, op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnt.nxv4i32" + )] + fn _svqrshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrnt_n_s32(even, op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnt.nxv2i64" + )] + fn _svqrshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrnt_n_s64(even, op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnt.nxv8i16" + )] + fn _svqrshrnt_n_u16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrnt_n_u16(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnt.nxv4i32" + )] + fn _svqrshrnt_n_u32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrnt_n_u32(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnt.nxv2i64" + )] + fn _svqrshrnt_n_u64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrnt_n_u64(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))] +pub fn svqrshrunb_n_s16(op1: svint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunb.nxv8i16" + )] + fn _svqrshrunb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrunb_n_s16(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))] +pub fn svqrshrunb_n_s32(op1: svint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunb.nxv4i32" + )] + fn _svqrshrunb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrunb_n_s32(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))] +pub fn svqrshrunb_n_s64(op1: svint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunb.nxv2i64" + )] + fn _svqrshrunb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrunb_n_s64(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))] +pub fn svqrshrunt_n_s16(even: svuint8_t, op1: svint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunt.nxv8i16" + )] + fn _svqrshrunt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrunt_n_s16(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))] +pub fn svqrshrunt_n_s32(even: svuint16_t, op1: svint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunt.nxv4i32" + )] + fn _svqrshrunt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrunt_n_s32(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))] +pub fn svqrshrunt_n_s64(even: svuint32_t, op1: svint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunt.nxv2i64" + )] + fn _svqrshrunt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrunt_n_s64(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv16i8")] + fn _svqshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqshl_s8_m(pg, op1, op2) } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqshl_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqshl_s8_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqshl_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqshl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqshl_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv8i16")] + fn _svqshl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqshl_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqshl_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqshl_s16_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqshl_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqshl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqshl_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv4i32")] + fn _svqshl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqshl_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqshl_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqshl_s32_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqshl_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqshl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqshl_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv2i64")] + fn _svqshl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqshl_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqshl_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqshl_s64_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqshl_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqshl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqshl_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv16i8")] + fn _svqshl_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqshl_u8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqshl_u8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svqshl_u8_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqshl_u8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svqshl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqshl_u8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv8i16")] + fn _svqshl_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqshl_u16_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqshl_u16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svqshl_u16_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqshl_u16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svqshl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqshl_u16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv4i32")] + fn _svqshl_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqshl_u32_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqshl_u32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svqshl_u32_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqshl_u32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svqshl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqshl_u32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv2i64")] + fn _svqshl_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqshl_u64_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqshl_u64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svqshl_u64_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqshl_u64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svqshl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqshl_u64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s8_m(pg: svbool_t, op1: svint8_t) -> svuint8_t { + static_assert_range!(IMM2, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv16i8")] + fn _svqshlu_n_s8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshlu_n_s8_m(pg, op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s8_x(pg: svbool_t, op1: svint8_t) -> svuint8_t { + svqshlu_n_s8_m::(pg, op1) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s8_z(pg: svbool_t, op1: svint8_t) -> svuint8_t { + svqshlu_n_s8_m::(pg, svsel_s8(pg, op1, svdup_n_s8(0))) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s16_m(pg: svbool_t, op1: svint16_t) -> svuint16_t { + static_assert_range!(IMM2, 0..=15); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv8i16")] + fn _svqshlu_n_s16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshlu_n_s16_m(pg.sve_into(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s16_x(pg: svbool_t, op1: svint16_t) -> svuint16_t { + svqshlu_n_s16_m::(pg, op1) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s16_z(pg: svbool_t, op1: svint16_t) -> svuint16_t { + svqshlu_n_s16_m::(pg, svsel_s16(pg, op1, svdup_n_s16(0))) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s32_m(pg: svbool_t, op1: svint32_t) -> svuint32_t { + static_assert_range!(IMM2, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv4i32")] + fn _svqshlu_n_s32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshlu_n_s32_m(pg.sve_into(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s32_x(pg: svbool_t, op1: svint32_t) -> svuint32_t { + svqshlu_n_s32_m::(pg, op1) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s32_z(pg: svbool_t, op1: svint32_t) -> svuint32_t { + svqshlu_n_s32_m::(pg, svsel_s32(pg, op1, svdup_n_s32(0))) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s64_m(pg: svbool_t, op1: svint64_t) -> svuint64_t { + static_assert_range!(IMM2, 0..=63); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv2i64")] + fn _svqshlu_n_s64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t; + } + unsafe { _svqshlu_n_s64_m(pg.sve_into(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s64_x(pg: svbool_t, op1: svint64_t) -> svuint64_t { + svqshlu_n_s64_m::(pg, op1) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s64_z(pg: svbool_t, op1: svint64_t) -> svuint64_t { + svqshlu_n_s64_m::(pg, svsel_s64(pg, op1, svdup_n_s64(0))) +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_s16(op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnb.nxv8i16" + )] + fn _svqshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrnb_n_s16(op1, IMM2) } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_s32(op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnb.nxv4i32" + )] + fn _svqshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrnb_n_s32(op1, IMM2) } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_s64(op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnb.nxv2i64" + )] + fn _svqshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrnb_n_s64(op1, IMM2) } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_u16(op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnb.nxv8i16" + )] + fn _svqshrnb_n_u16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrnb_n_u16(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_u32(op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnb.nxv4i32" + )] + fn _svqshrnb_n_u32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrnb_n_u32(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_u64(op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnb.nxv2i64" + )] + fn _svqshrnb_n_u64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrnb_n_u64(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnt.nxv8i16" + )] + fn _svqshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrnt_n_s16(even, op1, IMM2) } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnt.nxv4i32" + )] + fn _svqshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrnt_n_s32(even, op1, IMM2) } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnt.nxv2i64" + )] + fn _svqshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrnt_n_s64(even, op1, IMM2) } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnt.nxv8i16" + )] + fn _svqshrnt_n_u16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrnt_n_u16(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnt.nxv4i32" + )] + fn _svqshrnt_n_u32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrnt_n_u32(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnt.nxv2i64" + )] + fn _svqshrnt_n_u64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrnt_n_u64(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))] +pub fn svqshrunb_n_s16(op1: svint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunb.nxv8i16" + )] + fn _svqshrunb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrunb_n_s16(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))] +pub fn svqshrunb_n_s32(op1: svint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunb.nxv4i32" + )] + fn _svqshrunb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrunb_n_s32(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))] +pub fn svqshrunb_n_s64(op1: svint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunb.nxv2i64" + )] + fn _svqshrunb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrunb_n_s64(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))] +pub fn svqshrunt_n_s16(even: svuint8_t, op1: svint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunt.nxv8i16" + )] + fn _svqshrunt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrunt_n_s16(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))] +pub fn svqshrunt_n_s32(even: svuint16_t, op1: svint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunt.nxv4i32" + )] + fn _svqshrunt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrunt_n_s32(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))] +pub fn svqshrunt_n_s64(even: svuint32_t, op1: svint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunt.nxv2i64" + )] + fn _svqshrunt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrunt_n_s64(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv16i8")] + fn _svqsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsub_s8_m(pg, op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsub_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqsub_s8_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsub_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsub_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv8i16")] + fn _svqsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsub_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsub_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqsub_s16_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsub_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsub_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv4i32")] + fn _svqsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsub_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsub_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqsub_s32_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsub_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsub_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv2i64")] + fn _svqsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsub_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsub_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqsub_s64_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsub_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsub_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv16i8")] + fn _svqsub_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsub_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsub_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqsub_u8_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsub_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsub_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv8i16")] + fn _svqsub_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsub_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsub_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqsub_u16_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsub_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsub_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv4i32")] + fn _svqsub_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsub_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsub_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqsub_u32_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsub_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsub_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv2i64")] + fn _svqsub_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsub_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsub_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqsub_u64_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsub_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsub_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv16i8")] + fn _svqsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsubr_s8_m(pg, op1, op2) } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsubr_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqsubr_s8_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsubr_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsubr_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv8i16")] + fn _svqsubr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsubr_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsubr_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqsubr_s16_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsubr_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsubr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv4i32")] + fn _svqsubr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsubr_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsubr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqsubr_s32_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsubr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsubr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv2i64")] + fn _svqsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsubr_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsubr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqsubr_s64_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsubr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsubr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv16i8")] + fn _svqsubr_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsubr_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsubr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqsubr_u8_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsubr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsubr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv8i16")] + fn _svqsubr_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsubr_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsubr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqsubr_u16_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsubr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsubr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv4i32")] + fn _svqsubr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsubr_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsubr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqsubr_u32_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsubr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsubr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv2i64")] + fn _svqsubr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsubr_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsubr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqsubr_u64_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsubr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsubr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtnb))] +pub fn svqxtnb_s16(op: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnb.nxv8i16")] + fn _svqxtnb_s16(op: svint16_t) -> svint8_t; + } + unsafe { _svqxtnb_s16(op) } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtnb))] +pub fn svqxtnb_s32(op: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnb.nxv4i32")] + fn _svqxtnb_s32(op: svint32_t) -> svint16_t; + } + unsafe { _svqxtnb_s32(op) } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtnb))] +pub fn svqxtnb_s64(op: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnb.nxv2i64")] + fn _svqxtnb_s64(op: svint64_t) -> svint32_t; + } + unsafe { _svqxtnb_s64(op) } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqxtnb))] +pub fn svqxtnb_u16(op: svuint16_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnb.nxv8i16")] + fn _svqxtnb_u16(op: svint16_t) -> svint8_t; + } + unsafe { _svqxtnb_u16(op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqxtnb))] +pub fn svqxtnb_u32(op: svuint32_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnb.nxv4i32")] + fn _svqxtnb_u32(op: svint32_t) -> svint16_t; + } + unsafe { _svqxtnb_u32(op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqxtnb))] +pub fn svqxtnb_u64(op: svuint64_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnb.nxv2i64")] + fn _svqxtnb_u64(op: svint64_t) -> svint32_t; + } + unsafe { _svqxtnb_u64(op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtnt))] +pub fn svqxtnt_s16(even: svint8_t, op: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnt.nxv8i16")] + fn _svqxtnt_s16(even: svint8_t, op: svint16_t) -> svint8_t; + } + unsafe { _svqxtnt_s16(even, op) } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtnt))] +pub fn svqxtnt_s32(even: svint16_t, op: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnt.nxv4i32")] + fn _svqxtnt_s32(even: svint16_t, op: svint32_t) -> svint16_t; + } + unsafe { _svqxtnt_s32(even, op) } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtnt))] +pub fn svqxtnt_s64(even: svint32_t, op: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnt.nxv2i64")] + fn _svqxtnt_s64(even: svint32_t, op: svint64_t) -> svint32_t; + } + unsafe { _svqxtnt_s64(even, op) } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqxtnt))] +pub fn svqxtnt_u16(even: svuint8_t, op: svuint16_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnt.nxv8i16")] + fn _svqxtnt_u16(even: svint8_t, op: svint16_t) -> svint8_t; + } + unsafe { _svqxtnt_u16(even.as_signed(), op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqxtnt))] +pub fn svqxtnt_u32(even: svuint16_t, op: svuint32_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnt.nxv4i32")] + fn _svqxtnt_u32(even: svint16_t, op: svint32_t) -> svint16_t; + } + unsafe { _svqxtnt_u32(even.as_signed(), op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqxtnt))] +pub fn svqxtnt_u64(even: svuint32_t, op: svuint64_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnt.nxv2i64")] + fn _svqxtnt_u64(even: svint32_t, op: svint64_t) -> svint32_t; + } + unsafe { _svqxtnt_u64(even.as_signed(), op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtunb))] +pub fn svqxtunb_s16(op: svint16_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunb.nxv8i16" + )] + fn _svqxtunb_s16(op: svint16_t) -> svint8_t; + } + unsafe { _svqxtunb_s16(op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtunb))] +pub fn svqxtunb_s32(op: svint32_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunb.nxv4i32" + )] + fn _svqxtunb_s32(op: svint32_t) -> svint16_t; + } + unsafe { _svqxtunb_s32(op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtunb))] +pub fn svqxtunb_s64(op: svint64_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunb.nxv2i64" + )] + fn _svqxtunb_s64(op: svint64_t) -> svint32_t; + } + unsafe { _svqxtunb_s64(op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtunt))] +pub fn svqxtunt_s16(even: svuint8_t, op: svint16_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunt.nxv8i16" + )] + fn _svqxtunt_s16(even: svint8_t, op: svint16_t) -> svint8_t; + } + unsafe { _svqxtunt_s16(even.as_signed(), op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtunt))] +pub fn svqxtunt_s32(even: svuint16_t, op: svint32_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunt.nxv4i32" + )] + fn _svqxtunt_s32(even: svint16_t, op: svint32_t) -> svint16_t; + } + unsafe { _svqxtunt_s32(even.as_signed(), op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtunt))] +pub fn svqxtunt_s64(even: svuint32_t, op: svint64_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunt.nxv2i64" + )] + fn _svqxtunt_s64(even: svint32_t, op: svint64_t) -> svint32_t; + } + unsafe { _svqxtunt_s64(even.as_signed(), op).as_unsigned() } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnb.nxv8i16" + )] + fn _svraddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svraddhnb_s16(op1, op2) } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { + svraddhnb_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnb.nxv4i32" + )] + fn _svraddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svraddhnb_s32(op1, op2) } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { + svraddhnb_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnb.nxv2i64" + )] + fn _svraddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svraddhnb_s64(op1, op2) } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { + svraddhnb_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svraddhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { + svraddhnb_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svraddhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { + svraddhnb_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svraddhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { + svraddhnb_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnt.nxv8i16" + )] + fn _svraddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svraddhnt_s16(even, op1, op2) } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { + svraddhnt_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnt.nxv4i32" + )] + fn _svraddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svraddhnt_s32(even, op1, op2) } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { + svraddhnt_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnt.nxv2i64" + )] + fn _svraddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svraddhnt_s64(even, op1, op2) } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { + svraddhnt_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svraddhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t { + svraddhnt_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svraddhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t { + svraddhnt_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svraddhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t { + svraddhnt_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise rotate left by 1 and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrax1[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-sha3")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rax1))] +pub fn svrax1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rax1")] + fn _svrax1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrax1_s64(op1, op2) } +} +#[doc = "Bitwise rotate left by 1 and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrax1[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-sha3")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rax1))] +pub fn svrax1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svrax1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urecpe))] +pub fn svrecpe_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urecpe.nxv4i32")] + fn _svrecpe_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrecpe_u32_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urecpe))] +pub fn svrecpe_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrecpe_u32_m(op, pg, op) +} +#[doc = "Reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urecpe))] +pub fn svrecpe_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrecpe_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv16i8")] + fn _svrhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svrhadd_s8_m(pg, op1, op2) } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrhadd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svrhadd_s8_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrhadd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svrhadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrhadd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv8i16")] + fn _svrhadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svrhadd_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrhadd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svrhadd_s16_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrhadd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svrhadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrhadd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv4i32")] + fn _svrhadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svrhadd_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrhadd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svrhadd_s32_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrhadd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svrhadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrhadd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv2i64")] + fn _svrhadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrhadd_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrhadd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svrhadd_s64_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrhadd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svrhadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrhadd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv16i8")] + fn _svrhadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svrhadd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svrhadd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svrhadd_u8_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svrhadd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svrhadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svrhadd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv8i16")] + fn _svrhadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svrhadd_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svrhadd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svrhadd_u16_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svrhadd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svrhadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svrhadd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv4i32")] + fn _svrhadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svrhadd_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svrhadd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svrhadd_u32_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svrhadd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svrhadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svrhadd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv2i64")] + fn _svrhadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrhadd_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svrhadd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svrhadd_u64_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svrhadd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svrhadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svrhadd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv16i8")] + fn _svrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svrshl_s8_m(pg, op1, op2) } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrshl_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svrshl_s8_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrshl_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svrshl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrshl_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv8i16")] + fn _svrshl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svrshl_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrshl_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svrshl_s16_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrshl_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svrshl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrshl_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv4i32")] + fn _svrshl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svrshl_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrshl_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svrshl_s32_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrshl_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svrshl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrshl_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv2i64")] + fn _svrshl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrshl_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrshl_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svrshl_s64_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrshl_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svrshl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrshl_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv16i8")] + fn _svrshl_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svrshl_u8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svrshl_u8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svrshl_u8_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svrshl_u8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svrshl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svrshl_u8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv8i16")] + fn _svrshl_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svrshl_u16_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svrshl_u16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svrshl_u16_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svrshl_u16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svrshl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svrshl_u16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv4i32")] + fn _svrshl_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svrshl_u32_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svrshl_u32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svrshl_u32_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svrshl_u32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svrshl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svrshl_u32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv2i64")] + fn _svrshl_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrshl_u64_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svrshl_u64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svrshl_u64_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svrshl_u64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svrshl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svrshl_u64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s8_m(pg: svbool_t, op1: svint8_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv16i8")] + fn _svrshr_n_s8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t; + } + unsafe { _svrshr_n_s8_m(pg, op1, IMM2) } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s8_x(pg: svbool_t, op1: svint8_t) -> svint8_t { + svrshr_n_s8_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s8_z(pg: svbool_t, op1: svint8_t) -> svint8_t { + svrshr_n_s8_m::(pg, svsel_s8(pg, op1, svdup_n_s8(0))) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s16_m(pg: svbool_t, op1: svint16_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv8i16")] + fn _svrshr_n_s16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t; + } + unsafe { _svrshr_n_s16_m(pg.sve_into(), op1, IMM2) } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s16_x(pg: svbool_t, op1: svint16_t) -> svint16_t { + svrshr_n_s16_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s16_z(pg: svbool_t, op1: svint16_t) -> svint16_t { + svrshr_n_s16_m::(pg, svsel_s16(pg, op1, svdup_n_s16(0))) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s32_m(pg: svbool_t, op1: svint32_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv4i32")] + fn _svrshr_n_s32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t; + } + unsafe { _svrshr_n_s32_m(pg.sve_into(), op1, IMM2) } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s32_x(pg: svbool_t, op1: svint32_t) -> svint32_t { + svrshr_n_s32_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s32_z(pg: svbool_t, op1: svint32_t) -> svint32_t { + svrshr_n_s32_m::(pg, svsel_s32(pg, op1, svdup_n_s32(0))) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s64_m(pg: svbool_t, op1: svint64_t) -> svint64_t { + static_assert_range!(IMM2, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv2i64")] + fn _svrshr_n_s64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t; + } + unsafe { _svrshr_n_s64_m(pg.sve_into(), op1, IMM2) } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s64_x(pg: svbool_t, op1: svint64_t) -> svint64_t { + svrshr_n_s64_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s64_z(pg: svbool_t, op1: svint64_t) -> svint64_t { + svrshr_n_s64_m::(pg, svsel_s64(pg, op1, svdup_n_s64(0))) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u8_m(pg: svbool_t, op1: svuint8_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv16i8")] + fn _svrshr_n_u8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t; + } + unsafe { _svrshr_n_u8_m(pg, op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u8_x(pg: svbool_t, op1: svuint8_t) -> svuint8_t { + svrshr_n_u8_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u8_z(pg: svbool_t, op1: svuint8_t) -> svuint8_t { + svrshr_n_u8_m::(pg, svsel_u8(pg, op1, svdup_n_u8(0))) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u16_m(pg: svbool_t, op1: svuint16_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv8i16")] + fn _svrshr_n_u16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t; + } + unsafe { _svrshr_n_u16_m(pg.sve_into(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u16_x(pg: svbool_t, op1: svuint16_t) -> svuint16_t { + svrshr_n_u16_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u16_z(pg: svbool_t, op1: svuint16_t) -> svuint16_t { + svrshr_n_u16_m::(pg, svsel_u16(pg, op1, svdup_n_u16(0))) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u32_m(pg: svbool_t, op1: svuint32_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv4i32")] + fn _svrshr_n_u32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t; + } + unsafe { _svrshr_n_u32_m(pg.sve_into(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u32_x(pg: svbool_t, op1: svuint32_t) -> svuint32_t { + svrshr_n_u32_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u32_z(pg: svbool_t, op1: svuint32_t) -> svuint32_t { + svrshr_n_u32_m::(pg, svsel_u32(pg, op1, svdup_n_u32(0))) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u64_m(pg: svbool_t, op1: svuint64_t) -> svuint64_t { + static_assert_range!(IMM2, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv2i64")] + fn _svrshr_n_u64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t; + } + unsafe { _svrshr_n_u64_m(pg.sve_into(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u64_x(pg: svbool_t, op1: svuint64_t) -> svuint64_t { + svrshr_n_u64_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u64_z(pg: svbool_t, op1: svuint64_t) -> svuint64_t { + svrshr_n_u64_m::(pg, svsel_u64(pg, op1, svdup_n_u64(0))) +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_s16(op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnb.nxv8i16")] + fn _svrshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svrshrnb_n_s16(op1, IMM2) } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_s32(op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnb.nxv4i32")] + fn _svrshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svrshrnb_n_s32(op1, IMM2) } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_s64(op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnb.nxv2i64")] + fn _svrshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svrshrnb_n_s64(op1, IMM2) } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_u16(op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe { svrshrnb_n_s16::(op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_u32(op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe { svrshrnb_n_s32::(op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_u64(op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe { svrshrnb_n_s64::(op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnt.nxv8i16")] + fn _svrshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svrshrnt_n_s16(even, op1, IMM2) } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnt.nxv4i32")] + fn _svrshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svrshrnt_n_s32(even, op1, IMM2) } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnt.nxv2i64")] + fn _svrshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svrshrnt_n_s64(even, op1, IMM2) } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe { svrshrnt_n_s16::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe { svrshrnt_n_s32::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe { svrshrnt_n_s64::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ursqrte))] +pub fn svrsqrte_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ursqrte.nxv4i32" + )] + fn _svrsqrte_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrsqrte_u32_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ursqrte))] +pub fn svrsqrte_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrsqrte_u32_m(op, pg, op) +} +#[doc = "Reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ursqrte))] +pub fn svrsqrte_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrsqrte_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] +pub fn svrsra_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv16i8")] + fn _svrsra_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svrsra_n_s8(op1, op2, IMM3) } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] +pub fn svrsra_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv8i16")] + fn _svrsra_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svrsra_n_s16(op1, op2, IMM3) } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] +pub fn svrsra_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv4i32")] + fn _svrsra_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svrsra_n_s32(op1, op2, IMM3) } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] +pub fn svrsra_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv2i64")] + fn _svrsra_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svrsra_n_s64(op1, op2, IMM3) } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] +pub fn svrsra_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv16i8")] + fn _svrsra_n_u8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svrsra_n_u8(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] +pub fn svrsra_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv8i16")] + fn _svrsra_n_u16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svrsra_n_u16(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] +pub fn svrsra_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv4i32")] + fn _svrsra_n_u32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svrsra_n_u32(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] +pub fn svrsra_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv2i64")] + fn _svrsra_n_u64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svrsra_n_u64(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnb.nxv8i16" + )] + fn _svrsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svrsubhnb_s16(op1, op2) } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { + svrsubhnb_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnb.nxv4i32" + )] + fn _svrsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svrsubhnb_s32(op1, op2) } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { + svrsubhnb_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnb.nxv2i64" + )] + fn _svrsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svrsubhnb_s64(op1, op2) } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { + svrsubhnb_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svrsubhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { + svrsubhnb_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svrsubhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { + svrsubhnb_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svrsubhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { + svrsubhnb_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnt.nxv8i16" + )] + fn _svrsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svrsubhnt_s16(even, op1, op2) } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { + svrsubhnt_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnt.nxv4i32" + )] + fn _svrsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svrsubhnt_s32(even, op1, op2) } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { + svrsubhnt_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnt.nxv2i64" + )] + fn _svrsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svrsubhnt_s64(even, op1, op2) } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { + svrsubhnt_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svrsubhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t { + svrsubhnt_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svrsubhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t { + svrsubhnt_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svrsubhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t { + svrsubhnt_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract with borrow long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclb))] +pub fn svsbclb_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclb.nxv4i32")] + fn _svsbclb_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svsbclb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Subtract with borrow long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclb))] +pub fn svsbclb_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svsbclb_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Subtract with borrow long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclb))] +pub fn svsbclb_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclb.nxv2i64")] + fn _svsbclb_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svsbclb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Subtract with borrow long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclb))] +pub fn svsbclb_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svsbclb_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Subtract with borrow long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclt))] +pub fn svsbclt_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclt.nxv4i32")] + fn _svsbclt_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svsbclt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Subtract with borrow long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclt))] +pub fn svsbclt_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svsbclt_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Subtract with borrow long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclt))] +pub fn svsbclt_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclt.nxv2i64")] + fn _svsbclt_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svsbclt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Subtract with borrow long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclt))] +pub fn svsbclt_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svsbclt_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Shift left long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))] +pub fn svshllb_n_s16(op1: svint8_t) -> svint16_t { + static_assert_range!(IMM2, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllb.nxv8i16")] + fn _svshllb_n_s16(op1: svint8_t, imm2: i32) -> svint16_t; + } + unsafe { _svshllb_n_s16(op1, IMM2) } +} +#[doc = "Shift left long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))] +pub fn svshllb_n_s32(op1: svint16_t) -> svint32_t { + static_assert_range!(IMM2, 0..=15); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllb.nxv4i32")] + fn _svshllb_n_s32(op1: svint16_t, imm2: i32) -> svint32_t; + } + unsafe { _svshllb_n_s32(op1, IMM2) } +} +#[doc = "Shift left long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))] +pub fn svshllb_n_s64(op1: svint32_t) -> svint64_t { + static_assert_range!(IMM2, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllb.nxv2i64")] + fn _svshllb_n_s64(op1: svint32_t, imm2: i32) -> svint64_t; + } + unsafe { _svshllb_n_s64(op1, IMM2) } +} +#[doc = "Shift left long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))] +pub fn svshllb_n_u16(op1: svuint8_t) -> svuint16_t { + static_assert_range!(IMM2, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllb.nxv8i16")] + fn _svshllb_n_u16(op1: svint8_t, imm2: i32) -> svint16_t; + } + unsafe { _svshllb_n_u16(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))] +pub fn svshllb_n_u32(op1: svuint16_t) -> svuint32_t { + static_assert_range!(IMM2, 0..=15); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllb.nxv4i32")] + fn _svshllb_n_u32(op1: svint16_t, imm2: i32) -> svint32_t; + } + unsafe { _svshllb_n_u32(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))] +pub fn svshllb_n_u64(op1: svuint32_t) -> svuint64_t { + static_assert_range!(IMM2, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllb.nxv2i64")] + fn _svshllb_n_u64(op1: svint32_t, imm2: i32) -> svint64_t; + } + unsafe { _svshllb_n_u64(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))] +pub fn svshllt_n_s16(op1: svint8_t) -> svint16_t { + static_assert_range!(IMM2, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllt.nxv8i16")] + fn _svshllt_n_s16(op1: svint8_t, imm2: i32) -> svint16_t; + } + unsafe { _svshllt_n_s16(op1, IMM2) } +} +#[doc = "Shift left long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))] +pub fn svshllt_n_s32(op1: svint16_t) -> svint32_t { + static_assert_range!(IMM2, 0..=15); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllt.nxv4i32")] + fn _svshllt_n_s32(op1: svint16_t, imm2: i32) -> svint32_t; + } + unsafe { _svshllt_n_s32(op1, IMM2) } +} +#[doc = "Shift left long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))] +pub fn svshllt_n_s64(op1: svint32_t) -> svint64_t { + static_assert_range!(IMM2, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllt.nxv2i64")] + fn _svshllt_n_s64(op1: svint32_t, imm2: i32) -> svint64_t; + } + unsafe { _svshllt_n_s64(op1, IMM2) } +} +#[doc = "Shift left long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))] +pub fn svshllt_n_u16(op1: svuint8_t) -> svuint16_t { + static_assert_range!(IMM2, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllt.nxv8i16")] + fn _svshllt_n_u16(op1: svint8_t, imm2: i32) -> svint16_t; + } + unsafe { _svshllt_n_u16(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))] +pub fn svshllt_n_u32(op1: svuint16_t) -> svuint32_t { + static_assert_range!(IMM2, 0..=15); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllt.nxv4i32")] + fn _svshllt_n_u32(op1: svint16_t, imm2: i32) -> svint32_t; + } + unsafe { _svshllt_n_u32(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))] +pub fn svshllt_n_u64(op1: svuint32_t) -> svuint64_t { + static_assert_range!(IMM2, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllt.nxv2i64")] + fn _svshllt_n_u64(op1: svint32_t, imm2: i32) -> svint64_t; + } + unsafe { _svshllt_n_u64(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_s16(op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnb.nxv8i16")] + fn _svshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svshrnb_n_s16(op1, IMM2) } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_s32(op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnb.nxv4i32")] + fn _svshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svshrnb_n_s32(op1, IMM2) } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_s64(op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnb.nxv2i64")] + fn _svshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svshrnb_n_s64(op1, IMM2) } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_u16(op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe { svshrnb_n_s16::(op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_u32(op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe { svshrnb_n_s32::(op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_u64(op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe { svshrnb_n_s64::(op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnt.nxv8i16")] + fn _svshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svshrnt_n_s16(even, op1, IMM2) } +} +#[doc = "Shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnt.nxv4i32")] + fn _svshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svshrnt_n_s32(even, op1, IMM2) } +} +#[doc = "Shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnt.nxv2i64")] + fn _svshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svshrnt_n_s64(even, op1, IMM2) } +} +#[doc = "Shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe { svshrnt_n_s16::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe { svshrnt_n_s32::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe { svshrnt_n_s64::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv16i8")] + fn _svsli_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svsli_n_s8(op1, op2, IMM3) } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 0..=15); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv8i16")] + fn _svsli_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svsli_n_s16(op1, op2, IMM3) } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv4i32")] + fn _svsli_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svsli_n_s32(op1, op2, IMM3) } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 0..=63); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv2i64")] + fn _svsli_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svsli_n_s64(op1, op2, IMM3) } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 0..=7); + unsafe { svsli_n_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 0..=15); + unsafe { svsli_n_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 0..=31); + unsafe { svsli_n_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 0..=63); + unsafe { svsli_n_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "SM4 encryption and decryption"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsm4e[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-sm4")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sm4e))] +pub fn svsm4e_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sm4e")] + fn _svsm4e_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsm4e_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "SM4 key updates"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsm4ekey[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-sm4")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sm4ekey))] +pub fn svsm4ekey_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sm4ekey")] + fn _svsm4ekey_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsm4ekey_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv16i8")] + fn _svsqadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svsqadd_u8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svsqadd_u8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svsqadd_u8_m(pg, op1, op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svsqadd_u8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svsqadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svsqadd_u8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv8i16")] + fn _svsqadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsqadd_u16_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svsqadd_u16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svsqadd_u16_m(pg, op1, op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svsqadd_u16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svsqadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svsqadd_u16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv4i32")] + fn _svsqadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsqadd_u32_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svsqadd_u32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svsqadd_u32_m(pg, op1, op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svsqadd_u32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svsqadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svsqadd_u32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv2i64")] + fn _svsqadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svsqadd_u64_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svsqadd_u64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svsqadd_u64_m(pg, op1, op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svsqadd_u64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svsqadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svsqadd_u64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] +pub fn svsra_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv16i8")] + fn _svsra_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svsra_n_s8(op1, op2, IMM3) } +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] +pub fn svsra_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv8i16")] + fn _svsra_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svsra_n_s16(op1, op2, IMM3) } +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] +pub fn svsra_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv4i32")] + fn _svsra_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svsra_n_s32(op1, op2, IMM3) } +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] +pub fn svsra_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv2i64")] + fn _svsra_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svsra_n_s64(op1, op2, IMM3) } +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usra, IMM3 = 1))] +pub fn svsra_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv16i8")] + fn _svsra_n_u8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svsra_n_u8(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usra, IMM3 = 1))] +pub fn svsra_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv8i16")] + fn _svsra_n_u16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svsra_n_u16(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usra, IMM3 = 1))] +pub fn svsra_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv4i32")] + fn _svsra_n_u32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svsra_n_u32(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usra, IMM3 = 1))] +pub fn svsra_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv2i64")] + fn _svsra_n_u64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svsra_n_u64(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv16i8")] + fn _svsri_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svsri_n_s8(op1, op2, IMM3) } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv8i16")] + fn _svsri_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svsri_n_s16(op1, op2, IMM3) } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv4i32")] + fn _svsri_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svsri_n_s32(op1, op2, IMM3) } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv2i64")] + fn _svsri_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svsri_n_s64(op1, op2, IMM3) } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe { svsri_n_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe { svsri_n_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe { svsri_n_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe { svsri_n_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64index_f64( + pg: svbool_t, + base: *mut f64, + indices: svint64_t, + data: svfloat64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2f64" + )] + fn _svstnt1_scatter_s64index_f64( + data: svfloat64_t, + pg: svbool2_t, + base: *mut f64, + indices: svint64_t, + ); + } + _svstnt1_scatter_s64index_f64(data, pg.sve_into(), base, indices) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64index_s64( + pg: svbool_t, + base: *mut i64, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2i64" + )] + fn _svstnt1_scatter_s64index_s64( + data: svint64_t, + pg: svbool2_t, + base: *mut i64, + indices: svint64_t, + ); + } + _svstnt1_scatter_s64index_s64(data, pg.sve_into(), base, indices) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64index_u64( + pg: svbool_t, + base: *mut u64, + indices: svint64_t, + data: svuint64_t, +) { + svstnt1_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64index_f64( + pg: svbool_t, + base: *mut f64, + indices: svuint64_t, + data: svfloat64_t, +) { + svstnt1_scatter_s64index_f64(pg, base, indices.as_signed(), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64index_s64( + pg: svbool_t, + base: *mut i64, + indices: svuint64_t, + data: svint64_t, +) { + svstnt1_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64index_u64( + pg: svbool_t, + base: *mut u64, + indices: svuint64_t, + data: svuint64_t, +) { + svstnt1_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64offset_f64( + pg: svbool_t, + base: *mut f64, + offsets: svint64_t, + data: svfloat64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2f64" + )] + fn _svstnt1_scatter_s64offset_f64( + data: svfloat64_t, + pg: svbool2_t, + base: *mut f64, + offsets: svint64_t, + ); + } + _svstnt1_scatter_s64offset_f64(data, pg.sve_into(), base, offsets) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i64, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i64" + )] + fn _svstnt1_scatter_s64offset_s64( + data: svint64_t, + pg: svbool2_t, + base: *mut i64, + offsets: svint64_t, + ); + } + _svstnt1_scatter_s64offset_s64(data, pg.sve_into(), base, offsets) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u64, + offsets: svint64_t, + data: svuint64_t, +) { + svstnt1_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32offset_f32( + pg: svbool_t, + base: *mut f32, + offsets: svuint32_t, + data: svfloat32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4f32" + )] + fn _svstnt1_scatter_u32offset_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + offsets: svint32_t, + ); + } + _svstnt1_scatter_u32offset_f32(data, pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i32, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i32" + )] + fn _svstnt1_scatter_u32offset_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + offsets: svint32_t, + ); + } + _svstnt1_scatter_u32offset_s32(data, pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u32, + offsets: svuint32_t, + data: svuint32_t, +) { + svstnt1_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64offset_f64( + pg: svbool_t, + base: *mut f64, + offsets: svuint64_t, + data: svfloat64_t, +) { + svstnt1_scatter_s64offset_f64(pg, base, offsets.as_signed(), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i64, + offsets: svuint64_t, + data: svint64_t, +) { + svstnt1_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u64, + offsets: svuint64_t, + data: svuint64_t, +) { + svstnt1_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_f32(pg: svbool_t, bases: svuint32_t, data: svfloat32_t) { + svstnt1_scatter_u32base_offset_f32(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svstnt1_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svstnt1_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_f64(pg: svbool_t, bases: svuint64_t, data: svfloat64_t) { + svstnt1_scatter_u64base_offset_f64(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svstnt1_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svstnt1_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svfloat32_t, +) { + svstnt1_scatter_u32base_offset_f32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svint32_t, +) { + svstnt1_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svuint32_t, +) { + svstnt1_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svfloat64_t, +) { + svstnt1_scatter_u64base_offset_f64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svstnt1_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svstnt1_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svfloat32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svstnt1_scatter_u32base_offset_f32( + data: svfloat32_t, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svstnt1_scatter_u32base_offset_f32(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svstnt1_scatter_u32base_offset_s32( + data: svint32_t, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svstnt1_scatter_u32base_offset_s32(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svstnt1_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svfloat64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svstnt1_scatter_u64base_offset_f64( + data: svfloat64_t, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1_scatter_u64base_offset_f64(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svstnt1_scatter_u64base_offset_s64( + data: svint64_t, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1_scatter_u64base_offset_s64(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svstnt1_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i8, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i8" + )] + fn _svstnt1b_scatter_s64offset_s64( + data: nxv2i8, + pg: svbool2_t, + base: *mut i8, + offsets: svint64_t, + ); + } + _svstnt1b_scatter_s64offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i16, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i16" + )] + fn _svstnt1h_scatter_s64offset_s64( + data: nxv2i16, + pg: svbool2_t, + base: *mut i16, + offsets: svint64_t, + ); + } + _svstnt1h_scatter_s64offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i32, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i32" + )] + fn _svstnt1w_scatter_s64offset_s64( + data: nxv2i32, + pg: svbool2_t, + base: *mut i32, + offsets: svint64_t, + ); + } + _svstnt1w_scatter_s64offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u8, + offsets: svint64_t, + data: svuint64_t, +) { + svstnt1b_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u16, + offsets: svint64_t, + data: svuint64_t, +) { + svstnt1h_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u32, + offsets: svint64_t, + data: svuint64_t, +) { + svstnt1w_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i8, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i8" + )] + fn _svstnt1b_scatter_u32offset_s32( + data: nxv4i8, + pg: svbool4_t, + base: *mut i8, + offsets: svint32_t, + ); + } + _svstnt1b_scatter_u32offset_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets.as_signed(), + ) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i16, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i16" + )] + fn _svstnt1h_scatter_u32offset_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + offsets: svint32_t, + ); + } + _svstnt1h_scatter_u32offset_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets.as_signed(), + ) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u8, + offsets: svuint32_t, + data: svuint32_t, +) { + svstnt1b_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u16, + offsets: svuint32_t, + data: svuint32_t, +) { + svstnt1h_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i8, + offsets: svuint64_t, + data: svint64_t, +) { + svstnt1b_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i16, + offsets: svuint64_t, + data: svint64_t, +) { + svstnt1h_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i32, + offsets: svuint64_t, + data: svint64_t, +) { + svstnt1w_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u8, + offsets: svuint64_t, + data: svuint64_t, +) { + svstnt1b_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u16, + offsets: svuint64_t, + data: svuint64_t, +) { + svstnt1h_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u32, + offsets: svuint64_t, + data: svuint64_t, +) { + svstnt1w_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base]_offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svstnt1b_scatter_u32base_offset_s32( + data: nxv4i8, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svstnt1b_scatter_u32base_offset_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svstnt1h_scatter_u32base_offset_s32( + data: nxv4i16, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svstnt1h_scatter_u32base_offset_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base]_offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svstnt1b_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svstnt1h_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svstnt1b_scatter_u64base_offset_s64( + data: nxv2i8, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1b_scatter_u64base_offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svstnt1h_scatter_u64base_offset_s64( + data: nxv2i16, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1h_scatter_u64base_offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svstnt1w_scatter_u64base_offset_s64( + data: nxv2i32, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1w_scatter_u64base_offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svstnt1b_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svstnt1h_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svstnt1w_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svstnt1b_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svstnt1h_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svstnt1b_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svstnt1h_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svstnt1b_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svstnt1h_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svstnt1w_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svstnt1b_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svstnt1h_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svstnt1w_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_s64index_s64( + pg: svbool_t, + base: *mut i16, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2i16" + )] + fn _svstnt1h_scatter_s64index_s64( + data: nxv2i16, + pg: svbool2_t, + base: *mut i16, + indices: svint64_t, + ); + } + _svstnt1h_scatter_s64index_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + indices, + ) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_s64index_s64( + pg: svbool_t, + base: *mut i32, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2i32" + )] + fn _svstnt1w_scatter_s64index_s64( + data: nxv2i32, + pg: svbool2_t, + base: *mut i32, + indices: svint64_t, + ); + } + _svstnt1w_scatter_s64index_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + indices, + ) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_s64index_u64( + pg: svbool_t, + base: *mut u16, + indices: svint64_t, + data: svuint64_t, +) { + svstnt1h_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_s64index_u64( + pg: svbool_t, + base: *mut u32, + indices: svint64_t, + data: svuint64_t, +) { + svstnt1w_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64index_s64( + pg: svbool_t, + base: *mut i16, + indices: svuint64_t, + data: svint64_t, +) { + svstnt1h_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64index_s64( + pg: svbool_t, + base: *mut i32, + indices: svuint64_t, + data: svint64_t, +) { + svstnt1w_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64index_u64( + pg: svbool_t, + base: *mut u16, + indices: svuint64_t, + data: svuint64_t, +) { + svstnt1h_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64index_u64( + pg: svbool_t, + base: *mut u32, + indices: svuint64_t, + data: svuint64_t, +) { + svstnt1w_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svint32_t, +) { + svstnt1h_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svuint32_t, +) { + svstnt1h_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svstnt1h_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svstnt1w_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svstnt1h_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svstnt1w_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnb.nxv8i16")] + fn _svsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svsubhnb_s16(op1, op2) } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { + svsubhnb_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnb.nxv4i32")] + fn _svsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svsubhnb_s32(op1, op2) } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { + svsubhnb_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnb.nxv2i64")] + fn _svsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svsubhnb_s64(op1, op2) } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { + svsubhnb_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svsubhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { + svsubhnb_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svsubhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { + svsubhnb_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svsubhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { + svsubhnb_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnt.nxv8i16")] + fn _svsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svsubhnt_s16(even, op1, op2) } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { + svsubhnt_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnt.nxv4i32")] + fn _svsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svsubhnt_s32(even, op1, op2) } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { + svsubhnt_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnt.nxv2i64")] + fn _svsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svsubhnt_s64(even, op1, op2) } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { + svsubhnt_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svsubhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t { + svsubhnt_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svsubhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t { + svsubhnt_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svsubhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t { + svsubhnt_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublb.nxv8i16")] + fn _svsublb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublb_s16(op1, op2) } +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svsublb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublb.nxv4i32")] + fn _svsublb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublb_s32(op1, op2) } +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svsublb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublb.nxv2i64")] + fn _svsublb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublb_s64(op1, op2) } +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svsublb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublb.nxv8i16")] + fn _svsublb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svsublb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublb.nxv4i32")] + fn _svsublb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svsublb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublb.nxv2i64")] + fn _svsublb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svsublb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Subtract long (bottom - top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssublbt.nxv8i16" + )] + fn _svsublbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublbt_s16(op1, op2) } +} +#[doc = "Subtract long (bottom - top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svsublbt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract long (bottom - top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssublbt.nxv4i32" + )] + fn _svsublbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublbt_s32(op1, op2) } +} +#[doc = "Subtract long (bottom - top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svsublbt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract long (bottom - top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssublbt.nxv2i64" + )] + fn _svsublbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublbt_s64(op1, op2) } +} +#[doc = "Subtract long (bottom - top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svsublbt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublt.nxv8i16")] + fn _svsublt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublt_s16(op1, op2) } +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svsublt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublt.nxv4i32")] + fn _svsublt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublt_s32(op1, op2) } +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svsublt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublt.nxv2i64")] + fn _svsublt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublt_s64(op1, op2) } +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svsublt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublt.nxv8i16")] + fn _svsublt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svsublt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublt.nxv4i32")] + fn _svsublt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svsublt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublt.nxv2i64")] + fn _svsublt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svsublt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Subtract long (top - bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssubltb.nxv8i16" + )] + fn _svsubltb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubltb_s16(op1, op2) } +} +#[doc = "Subtract long (top - bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svsubltb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract long (top - bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssubltb.nxv4i32" + )] + fn _svsubltb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubltb_s32(op1, op2) } +} +#[doc = "Subtract long (top - bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svsubltb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract long (top - bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssubltb.nxv2i64" + )] + fn _svsubltb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubltb_s64(op1, op2) } +} +#[doc = "Subtract long (top - bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svsubltb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwb.nxv8i16")] + fn _svsubwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubwb_s16(op1, op2) } +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_n_s16(op1: svint16_t, op2: i8) -> svint16_t { + svsubwb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwb.nxv4i32")] + fn _svsubwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubwb_s32(op1, op2) } +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_n_s32(op1: svint32_t, op2: i16) -> svint32_t { + svsubwb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwb.nxv2i64")] + fn _svsubwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubwb_s64(op1, op2) } +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_n_s64(op1: svint64_t, op2: i32) -> svint64_t { + svsubwb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwb.nxv8i16")] + fn _svsubwb_u16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubwb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { + svsubwb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwb.nxv4i32")] + fn _svsubwb_u32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubwb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { + svsubwb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwb.nxv2i64")] + fn _svsubwb_u64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubwb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { + svsubwb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwt.nxv8i16")] + fn _svsubwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubwt_s16(op1, op2) } +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_n_s16(op1: svint16_t, op2: i8) -> svint16_t { + svsubwt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwt.nxv4i32")] + fn _svsubwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubwt_s32(op1, op2) } +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_n_s32(op1: svint32_t, op2: i16) -> svint32_t { + svsubwt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwt.nxv2i64")] + fn _svsubwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubwt_s64(op1, op2) } +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_n_s64(op1: svint64_t, op2: i32) -> svint64_t { + svsubwt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwt.nxv8i16")] + fn _svsubwt_u16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubwt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { + svsubwt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwt.nxv4i32")] + fn _svsubwt_u32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubwt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { + svsubwt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwt.nxv2i64")] + fn _svsubwt_u64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubwt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { + svsubwt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_f32(data: svfloat32x2_t, indices: svuint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv4f32")] + fn _svtbl2_f32(data0: svfloat32_t, data1: svfloat32_t, indices: svint32_t) -> svfloat32_t; + } + unsafe { + _svtbl2_f32( + svget2_f32::<0>(data), + svget2_f32::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_f64(data: svfloat64x2_t, indices: svuint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv2f64")] + fn _svtbl2_f64(data0: svfloat64_t, data1: svfloat64_t, indices: svint64_t) -> svfloat64_t; + } + unsafe { + _svtbl2_f64( + svget2_f64::<0>(data), + svget2_f64::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_s8(data: svint8x2_t, indices: svuint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv16i8")] + fn _svtbl2_s8(data0: svint8_t, data1: svint8_t, indices: svint8_t) -> svint8_t; + } + unsafe { + _svtbl2_s8( + svget2_s8::<0>(data), + svget2_s8::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_s16(data: svint16x2_t, indices: svuint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv8i16")] + fn _svtbl2_s16(data0: svint16_t, data1: svint16_t, indices: svint16_t) -> svint16_t; + } + unsafe { + _svtbl2_s16( + svget2_s16::<0>(data), + svget2_s16::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_s32(data: svint32x2_t, indices: svuint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv4i32")] + fn _svtbl2_s32(data0: svint32_t, data1: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { + _svtbl2_s32( + svget2_s32::<0>(data), + svget2_s32::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_s64(data: svint64x2_t, indices: svuint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv2i64")] + fn _svtbl2_s64(data0: svint64_t, data1: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { + _svtbl2_s64( + svget2_s64::<0>(data), + svget2_s64::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_u8(data: svuint8x2_t, indices: svuint8_t) -> svuint8_t { + unsafe { svtbl2_s8(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_u16(data: svuint16x2_t, indices: svuint16_t) -> svuint16_t { + unsafe { svtbl2_s16(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_u32(data: svuint32x2_t, indices: svuint32_t) -> svuint32_t { + unsafe { svtbl2_s32(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_u64(data: svuint64x2_t, indices: svuint64_t) -> svuint64_t { + unsafe { svtbl2_s64(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_f32(fallback: svfloat32_t, data: svfloat32_t, indices: svuint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv4f32")] + fn _svtbx_f32(fallback: svfloat32_t, data: svfloat32_t, indices: svint32_t) -> svfloat32_t; + } + unsafe { _svtbx_f32(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_f64(fallback: svfloat64_t, data: svfloat64_t, indices: svuint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv2f64")] + fn _svtbx_f64(fallback: svfloat64_t, data: svfloat64_t, indices: svint64_t) -> svfloat64_t; + } + unsafe { _svtbx_f64(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_s8(fallback: svint8_t, data: svint8_t, indices: svuint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv16i8")] + fn _svtbx_s8(fallback: svint8_t, data: svint8_t, indices: svint8_t) -> svint8_t; + } + unsafe { _svtbx_s8(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_s16(fallback: svint16_t, data: svint16_t, indices: svuint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv8i16")] + fn _svtbx_s16(fallback: svint16_t, data: svint16_t, indices: svint16_t) -> svint16_t; + } + unsafe { _svtbx_s16(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_s32(fallback: svint32_t, data: svint32_t, indices: svuint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv4i32")] + fn _svtbx_s32(fallback: svint32_t, data: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svtbx_s32(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_s64(fallback: svint64_t, data: svint64_t, indices: svuint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv2i64")] + fn _svtbx_s64(fallback: svint64_t, data: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svtbx_s64(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_u8(fallback: svuint8_t, data: svuint8_t, indices: svuint8_t) -> svuint8_t { + unsafe { svtbx_s8(fallback.as_signed(), data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_u16(fallback: svuint16_t, data: svuint16_t, indices: svuint16_t) -> svuint16_t { + unsafe { svtbx_s16(fallback.as_signed(), data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_u32(fallback: svuint32_t, data: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svtbx_s32(fallback.as_signed(), data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_u64(fallback: svuint64_t, data: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svtbx_s64(fallback.as_signed(), data.as_signed(), indices).as_unsigned() } +} +#[doc = "Unpack and extend high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_b])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(punpkhi))] +pub fn svunpkhi_b(op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.punpkhi.nxv16i1" + )] + fn _svunpkhi_b(op: svbool_t) -> svbool8_t; + } + unsafe { _svunpkhi_b(op).sve_into() } +} +#[doc = "Unpack and extend high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sunpkhi))] +pub fn svunpkhi_s16(op: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpkhi.nxv8i16" + )] + fn _svunpkhi_s16(op: svint8_t) -> svint16_t; + } + unsafe { _svunpkhi_s16(op) } +} +#[doc = "Unpack and extend high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sunpkhi))] +pub fn svunpkhi_s32(op: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpkhi.nxv4i32" + )] + fn _svunpkhi_s32(op: svint16_t) -> svint32_t; + } + unsafe { _svunpkhi_s32(op) } +} +#[doc = "Unpack and extend high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sunpkhi))] +pub fn svunpkhi_s64(op: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpkhi.nxv2i64" + )] + fn _svunpkhi_s64(op: svint32_t) -> svint64_t; + } + unsafe { _svunpkhi_s64(op) } +} +#[doc = "Unpack and extend high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uunpkhi))] +pub fn svunpkhi_u16(op: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpkhi.nxv8i16" + )] + fn _svunpkhi_u16(op: svint8_t) -> svint16_t; + } + unsafe { _svunpkhi_u16(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uunpkhi))] +pub fn svunpkhi_u32(op: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpkhi.nxv4i32" + )] + fn _svunpkhi_u32(op: svint16_t) -> svint32_t; + } + unsafe { _svunpkhi_u32(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uunpkhi))] +pub fn svunpkhi_u64(op: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpkhi.nxv2i64" + )] + fn _svunpkhi_u64(op: svint32_t) -> svint64_t; + } + unsafe { _svunpkhi_u64(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend low half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_b])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(punpklo))] +pub fn svunpklo_b(op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.punpklo.nxv16i1" + )] + fn _svunpklo_b(op: svbool_t) -> svbool8_t; + } + unsafe { _svunpklo_b(op).sve_into() } +} +#[doc = "Unpack and extend low half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sunpklo))] +pub fn svunpklo_s16(op: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpklo.nxv8i16" + )] + fn _svunpklo_s16(op: svint8_t) -> svint16_t; + } + unsafe { _svunpklo_s16(op) } +} +#[doc = "Unpack and extend low half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sunpklo))] +pub fn svunpklo_s32(op: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpklo.nxv4i32" + )] + fn _svunpklo_s32(op: svint16_t) -> svint32_t; + } + unsafe { _svunpklo_s32(op) } +} +#[doc = "Unpack and extend low half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sunpklo))] +pub fn svunpklo_s64(op: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpklo.nxv2i64" + )] + fn _svunpklo_s64(op: svint32_t) -> svint64_t; + } + unsafe { _svunpklo_s64(op) } +} +#[doc = "Unpack and extend low half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uunpklo))] +pub fn svunpklo_u16(op: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpklo.nxv8i16" + )] + fn _svunpklo_u16(op: svint8_t) -> svint16_t; + } + unsafe { _svunpklo_u16(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend low half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uunpklo))] +pub fn svunpklo_u32(op: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpklo.nxv4i32" + )] + fn _svunpklo_u32(op: svint16_t) -> svint32_t; + } + unsafe { _svunpklo_u32(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend low half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uunpklo))] +pub fn svunpklo_u64(op: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpklo.nxv2i64" + )] + fn _svunpklo_u64(op: svint32_t) -> svint64_t; + } + unsafe { _svunpklo_u64(op.as_signed()).as_unsigned() } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv16i8")] + fn _svuqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuqadd_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svuqadd_s8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svuqadd_s8_m(pg, op1, op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svuqadd_s8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svuqadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svuqadd_s8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv8i16")] + fn _svuqadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuqadd_s16_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svuqadd_s16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svuqadd_s16_m(pg, op1, op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svuqadd_s16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svuqadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svuqadd_s16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv4i32")] + fn _svuqadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuqadd_s32_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svuqadd_s32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svuqadd_s32_m(pg, op1, op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svuqadd_s32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svuqadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svuqadd_s32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv2i64")] + fn _svuqadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svuqadd_s64_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svuqadd_s64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svuqadd_s64_m(pg, op1, op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svuqadd_s64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svuqadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svuqadd_s64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b8_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv16i1.i32" + )] + fn _svwhilege_b8_s32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilege_b8_s32(op1, op2) } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b16_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv8i1.i32" + )] + fn _svwhilege_b16_s32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilege_b16_s32(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b32_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv4i1.i32" + )] + fn _svwhilege_b32_s32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilege_b32_s32(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b64_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv2i1.i32" + )] + fn _svwhilege_b64_s32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilege_b64_s32(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b8_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv16i1.i64" + )] + fn _svwhilege_b8_s64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilege_b8_s64(op1, op2) } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b16_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv8i1.i64" + )] + fn _svwhilege_b16_s64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilege_b16_s64(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b32_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv4i1.i64" + )] + fn _svwhilege_b32_s64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilege_b32_s64(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b64_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv2i1.i64" + )] + fn _svwhilege_b64_s64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilege_b64_s64(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b8_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv16i1.i32" + )] + fn _svwhilege_b8_u32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilege_b8_u32(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b16_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv8i1.i32" + )] + fn _svwhilege_b16_u32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilege_b16_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b32_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv4i1.i32" + )] + fn _svwhilege_b32_u32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilege_b32_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b64_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv2i1.i32" + )] + fn _svwhilege_b64_u32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilege_b64_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b8_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv16i1.i64" + )] + fn _svwhilege_b8_u64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilege_b8_u64(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b16_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv8i1.i64" + )] + fn _svwhilege_b16_u64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilege_b16_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b32_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv4i1.i64" + )] + fn _svwhilege_b32_u64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilege_b32_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b64_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv2i1.i64" + )] + fn _svwhilege_b64_u64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilege_b64_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b8_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv16i1.i32" + )] + fn _svwhilegt_b8_s32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilegt_b8_s32(op1, op2) } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b16_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv8i1.i32" + )] + fn _svwhilegt_b16_s32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilegt_b16_s32(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b32_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv4i1.i32" + )] + fn _svwhilegt_b32_s32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilegt_b32_s32(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b64_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv2i1.i32" + )] + fn _svwhilegt_b64_s32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilegt_b64_s32(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b8_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv16i1.i64" + )] + fn _svwhilegt_b8_s64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilegt_b8_s64(op1, op2) } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b16_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv8i1.i64" + )] + fn _svwhilegt_b16_s64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilegt_b16_s64(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b32_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv4i1.i64" + )] + fn _svwhilegt_b32_s64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilegt_b32_s64(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b64_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv2i1.i64" + )] + fn _svwhilegt_b64_s64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilegt_b64_s64(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b8_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv16i1.i32" + )] + fn _svwhilegt_b8_u32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilegt_b8_u32(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b16_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv8i1.i32" + )] + fn _svwhilegt_b16_u32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilegt_b16_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b32_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv4i1.i32" + )] + fn _svwhilegt_b32_u32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilegt_b32_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b64_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv2i1.i32" + )] + fn _svwhilegt_b64_u32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilegt_b64_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b8_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv16i1.i64" + )] + fn _svwhilegt_b8_u64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilegt_b8_u64(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b16_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv8i1.i64" + )] + fn _svwhilegt_b16_u64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilegt_b16_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b32_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv4i1.i64" + )] + fn _svwhilegt_b32_u64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilegt_b32_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b64_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv2i1.i64" + )] + fn _svwhilegt_b64_u64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilegt_b64_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilerw_8ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilerw.b.nxv16i1.p0" + )] + fn _svwhilerw_8ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool_t; + } + _svwhilerw_8ptr(op1, op2) +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilerw_16ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilerw.h.nxv8i1.p0" + )] + fn _svwhilerw_16ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool8_t; + } + _svwhilerw_16ptr(op1, op2).sve_into() +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilerw_32ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilerw.s.nxv4i1.p0" + )] + fn _svwhilerw_32ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool4_t; + } + _svwhilerw_32ptr(op1, op2).sve_into() +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilerw_64ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilerw.d.nxv2i1.p0" + )] + fn _svwhilerw_64ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool2_t; + } + _svwhilerw_64ptr(op1, op2).sve_into() +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_f32(op1: *const f32, op2: *const f32) -> svbool_t { + svwhilerw_32ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_f64(op1: *const f64, op2: *const f64) -> svbool_t { + svwhilerw_64ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_s8(op1: *const i8, op2: *const i8) -> svbool_t { + svwhilerw_8ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_s16(op1: *const i16, op2: *const i16) -> svbool_t { + svwhilerw_16ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_s32(op1: *const i32, op2: *const i32) -> svbool_t { + svwhilerw_32ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_s64(op1: *const i64, op2: *const i64) -> svbool_t { + svwhilerw_64ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_u8(op1: *const u8, op2: *const u8) -> svbool_t { + svwhilerw_8ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_u16(op1: *const u16, op2: *const u16) -> svbool_t { + svwhilerw_16ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_u32(op1: *const u32, op2: *const u32) -> svbool_t { + svwhilerw_32ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_u64(op1: *const u64, op2: *const u64) -> svbool_t { + svwhilerw_64ptr::(op1, op2) +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilewr_8ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilewr.b.nxv16i1.p0" + )] + fn _svwhilewr_8ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool_t; + } + _svwhilewr_8ptr(op1, op2) +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilewr_16ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilewr.h.nxv8i1.p0" + )] + fn _svwhilewr_16ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool8_t; + } + _svwhilewr_16ptr(op1, op2).sve_into() +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilewr_32ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilewr.s.nxv4i1.p0" + )] + fn _svwhilewr_32ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool4_t; + } + _svwhilewr_32ptr(op1, op2).sve_into() +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilewr_64ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilewr.d.nxv2i1.p0" + )] + fn _svwhilewr_64ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool2_t; + } + _svwhilewr_64ptr(op1, op2).sve_into() +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_f32(op1: *const f32, op2: *const f32) -> svbool_t { + svwhilewr_32ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_f64(op1: *const f64, op2: *const f64) -> svbool_t { + svwhilewr_64ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_s8(op1: *const i8, op2: *const i8) -> svbool_t { + svwhilewr_8ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_s16(op1: *const i16, op2: *const i16) -> svbool_t { + svwhilewr_16ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_s32(op1: *const i32, op2: *const i32) -> svbool_t { + svwhilewr_32ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_s64(op1: *const i64, op2: *const i64) -> svbool_t { + svwhilewr_64ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_u8(op1: *const u8, op2: *const u8) -> svbool_t { + svwhilewr_8ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_u16(op1: *const u16, op2: *const u16) -> svbool_t { + svwhilewr_16ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_u32(op1: *const u32, op2: *const u32) -> svbool_t { + svwhilewr_32ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_u64(op1: *const u64, op2: *const u64) -> svbool_t { + svwhilewr_64ptr::(op1, op2) +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv16i8")] + fn _svxar_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svxar_n_s8(op1, op2, IMM3) } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv8i16")] + fn _svxar_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svxar_n_s16(op1, op2, IMM3) } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv4i32")] + fn _svxar_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svxar_n_s32(op1, op2, IMM3) } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv2i64")] + fn _svxar_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svxar_n_s64(op1, op2, IMM3) } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe { svxar_n_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe { svxar_n_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe { svxar_n_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe { svxar_n_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve2/ld_st_tests_aarch64.rs b/library/stdarch/crates/core_arch/src/aarch64/sve2/ld_st_tests_aarch64.rs new file mode 100644 index 0000000000000..2ec3ad6a5d04c --- /dev/null +++ b/library/stdarch/crates/core_arch/src/aarch64/sve2/ld_st_tests_aarch64.rs @@ -0,0 +1,2482 @@ +// This code is automatically generated. DO NOT MODIFY. +// +// Instead, modify `crates/stdarch-gen-arm/spec/sve` and run the following command to re-generate +// this file: +// +// ``` +// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec +// ``` +#![allow(unused)] +use super::*; +use std::boxed::Box; +use std::convert::{TryFrom, TryInto}; +use std::sync::LazyLock; +use std::vec::Vec; +use stdarch_test::simd_test; +static F32_DATA: LazyLock<[f32; 64 * 5]> = LazyLock::new(|| { + (0..64 * 5) + .map(|i| i as f32) + .collect::>() + .try_into() + .expect("f32 data incorrectly initialised") +}); +static F64_DATA: LazyLock<[f64; 32 * 5]> = LazyLock::new(|| { + (0..32 * 5) + .map(|i| i as f64) + .collect::>() + .try_into() + .expect("f64 data incorrectly initialised") +}); +static I8_DATA: LazyLock<[i8; 256 * 5]> = LazyLock::new(|| { + (0..256 * 5) + .map(|i| ((i + 128) % 256 - 128) as i8) + .collect::>() + .try_into() + .expect("i8 data incorrectly initialised") +}); +static I16_DATA: LazyLock<[i16; 128 * 5]> = LazyLock::new(|| { + (0..128 * 5) + .map(|i| i as i16) + .collect::>() + .try_into() + .expect("i16 data incorrectly initialised") +}); +static I32_DATA: LazyLock<[i32; 64 * 5]> = LazyLock::new(|| { + (0..64 * 5) + .map(|i| i as i32) + .collect::>() + .try_into() + .expect("i32 data incorrectly initialised") +}); +static I64_DATA: LazyLock<[i64; 32 * 5]> = LazyLock::new(|| { + (0..32 * 5) + .map(|i| i as i64) + .collect::>() + .try_into() + .expect("i64 data incorrectly initialised") +}); +static U8_DATA: LazyLock<[u8; 256 * 5]> = LazyLock::new(|| { + (0..256 * 5) + .map(|i| i as u8) + .collect::>() + .try_into() + .expect("u8 data incorrectly initialised") +}); +static U16_DATA: LazyLock<[u16; 128 * 5]> = LazyLock::new(|| { + (0..128 * 5) + .map(|i| i as u16) + .collect::>() + .try_into() + .expect("u16 data incorrectly initialised") +}); +static U32_DATA: LazyLock<[u32; 64 * 5]> = LazyLock::new(|| { + (0..64 * 5) + .map(|i| i as u32) + .collect::>() + .try_into() + .expect("u32 data incorrectly initialised") +}); +static U64_DATA: LazyLock<[u64; 32 * 5]> = LazyLock::new(|| { + (0..32 * 5) + .map(|i| i as u64) + .collect::>() + .try_into() + .expect("u64 data incorrectly initialised") +}); +#[target_feature(enable = "sve")] +fn assert_vector_matches_f32(vector: svfloat32_t, expected: svfloat32_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b32(), defined)); + let cmp = svcmpne_f32(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_f64(vector: svfloat64_t, expected: svfloat64_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b64(), defined)); + let cmp = svcmpne_f64(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i8(vector: svint8_t, expected: svint8_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b8(), defined)); + let cmp = svcmpne_s8(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i16(vector: svint16_t, expected: svint16_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b16(), defined)); + let cmp = svcmpne_s16(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i32(vector: svint32_t, expected: svint32_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b32(), defined)); + let cmp = svcmpne_s32(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i64(vector: svint64_t, expected: svint64_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b64(), defined)); + let cmp = svcmpne_s64(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u8(vector: svuint8_t, expected: svuint8_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b8(), defined)); + let cmp = svcmpne_u8(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u16(vector: svuint16_t, expected: svuint16_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b16(), defined)); + let cmp = svcmpne_u16(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u32(vector: svuint32_t, expected: svuint32_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b32(), defined)); + let cmp = svcmpne_u32(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u64(vector: svuint64_t, expected: svuint64_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b64(), defined)); + let cmp = svcmpne_u64(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_s64index_f64_with_svstnt1_scatter_s64index_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let indices = svindex_s64(0, 1); + svstnt1_scatter_s64index_f64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = + svldnt1_gather_s64index_f64(svptrue_b64(), storage.as_ptr() as *const f64, indices); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_s64index_s64_with_svstnt1_scatter_s64index_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1_scatter_s64index_s64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = + svldnt1_gather_s64index_s64(svptrue_b64(), storage.as_ptr() as *const i64, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_s64index_u64_with_svstnt1_scatter_s64index_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1_scatter_s64index_u64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = + svldnt1_gather_s64index_u64(svptrue_b64(), storage.as_ptr() as *const u64, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64index_f64_with_svstnt1_scatter_u64index_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let indices = svindex_u64(0, 1); + svstnt1_scatter_u64index_f64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = + svldnt1_gather_u64index_f64(svptrue_b64(), storage.as_ptr() as *const f64, indices); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64index_s64_with_svstnt1_scatter_u64index_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1_scatter_u64index_s64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = + svldnt1_gather_u64index_s64(svptrue_b64(), storage.as_ptr() as *const i64, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64index_u64_with_svstnt1_scatter_u64index_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1_scatter_u64index_u64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = + svldnt1_gather_u64index_u64(svptrue_b64(), storage.as_ptr() as *const u64, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_s64offset_f64_with_svstnt1_scatter_s64offset_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); + svstnt1_scatter_s64offset_f64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = + svldnt1_gather_s64offset_f64(svptrue_b64(), storage.as_ptr() as *const f64, offsets); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_s64offset_s64_with_svstnt1_scatter_s64offset_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); + svstnt1_scatter_s64offset_s64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = + svldnt1_gather_s64offset_s64(svptrue_b64(), storage.as_ptr() as *const i64, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_s64offset_u64_with_svstnt1_scatter_s64offset_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); + svstnt1_scatter_s64offset_u64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = + svldnt1_gather_s64offset_u64(svptrue_b64(), storage.as_ptr() as *const u64, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32offset_f32_with_svstnt1_scatter_u32offset_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32offset_f32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = + svldnt1_gather_u32offset_f32(svptrue_b32(), storage.as_ptr() as *const f32, offsets); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32offset_s32_with_svstnt1_scatter_u32offset_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32offset_s32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1_gather_u32offset_s32(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32offset_u32_with_svstnt1_scatter_u32offset_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32offset_u32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1_gather_u32offset_u32(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64offset_f64_with_svstnt1_scatter_u64offset_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + svstnt1_scatter_u64offset_f64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = + svldnt1_gather_u64offset_f64(svptrue_b64(), storage.as_ptr() as *const f64, offsets); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64offset_s64_with_svstnt1_scatter_u64offset_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + svstnt1_scatter_u64offset_s64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = + svldnt1_gather_u64offset_s64(svptrue_b64(), storage.as_ptr() as *const i64, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64offset_u64_with_svstnt1_scatter_u64offset_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + svstnt1_scatter_u64offset_u64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = + svldnt1_gather_u64offset_u64(svptrue_b64(), storage.as_ptr() as *const u64, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_f64_with_svstnt1_scatter_u64base_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_f64(svptrue_b64(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_f64(svptrue_b64(), bases); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_s64_with_svstnt1_scatter_u64base_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_s64(svptrue_b64(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_s64(svptrue_b64(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_u64_with_svstnt1_scatter_u64base_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_u64(svptrue_b64(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_u64(svptrue_b64(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32base_index_f32_with_svstnt1_scatter_u32base_index_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32base_index_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svldnt1_gather_u32base_index_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + ); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32base_index_s32_with_svstnt1_scatter_u32base_index_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32base_index_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1_gather_u32base_index_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32base_index_u32_with_svstnt1_scatter_u32base_index_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32base_index_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svldnt1_gather_u32base_index_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_index_f64_with_svstnt1_scatter_u64base_index_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_index_f64(svptrue_b64(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_index_f64(svptrue_b64(), bases, 1.try_into().unwrap()); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_index_s64_with_svstnt1_scatter_u64base_index_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_index_s64(svptrue_b64(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_index_s64(svptrue_b64(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_index_u64_with_svstnt1_scatter_u64base_index_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_index_u64(svptrue_b64(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_index_u64(svptrue_b64(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32base_offset_f32_with_svstnt1_scatter_u32base_offset_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32base_offset_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svldnt1_gather_u32base_offset_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + ); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32base_offset_s32_with_svstnt1_scatter_u32base_offset_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32base_offset_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1_gather_u32base_offset_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32base_offset_u32_with_svstnt1_scatter_u32base_offset_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32base_offset_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svldnt1_gather_u32base_offset_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_offset_f64_with_svstnt1_scatter_u64base_offset_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_offset_f64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_offset_f64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_offset_s64_with_svstnt1_scatter_u64base_offset_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_offset_s64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_offset_s64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_offset_u64_with_svstnt1_scatter_u64base_offset_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_offset_u64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_offset_u64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_s64offset_s64_with_svstnt1b_scatter_s64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_s64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = + svldnt1sb_gather_s64offset_s64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_s64offset_s64_with_svstnt1h_scatter_s64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_s64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_s64offset_s64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_s64offset_s64_with_svstnt1w_scatter_s64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_s64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_s64offset_s64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_s64offset_u64_with_svstnt1b_scatter_s64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_s64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = + svldnt1sb_gather_s64offset_u64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_s64offset_u64_with_svstnt1h_scatter_s64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_s64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_s64offset_u64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_s64offset_u64_with_svstnt1w_scatter_s64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_s64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_s64offset_u64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u32offset_s32_with_svstnt1b_scatter_u32offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = + svldnt1sb_gather_u32offset_s32(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u32offset_s32_with_svstnt1h_scatter_u32offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u32offset_s32(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u32offset_u32_with_svstnt1b_scatter_u32offset_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = + svldnt1sb_gather_u32offset_u32(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u32offset_u32_with_svstnt1h_scatter_u32offset_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u32offset_u32(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u64offset_s64_with_svstnt1b_scatter_u64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = + svldnt1sb_gather_u64offset_s64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64offset_s64_with_svstnt1h_scatter_u64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u64offset_s64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64offset_s64_with_svstnt1w_scatter_u64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_u64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_u64offset_s64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u64offset_u64_with_svstnt1b_scatter_u64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = + svldnt1sb_gather_u64offset_u64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64offset_u64_with_svstnt1h_scatter_u64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u64offset_u64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64offset_u64_with_svstnt1w_scatter_u64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_u64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_u64offset_u64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u32base_offset_s32_with_svstnt1b_scatter_u32base_offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1sb_gather_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u32base_offset_s32_with_svstnt1h_scatter_u32base_offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u32base_offset_u32_with_svstnt1b_scatter_u32base_offset_u32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1sb_gather_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u32base_offset_u32_with_svstnt1h_scatter_u32base_offset_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u64base_offset_s64_with_svstnt1b_scatter_u64base_offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1sb_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64base_offset_s64_with_svstnt1h_scatter_u64base_offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64base_offset_s64_with_svstnt1w_scatter_u64base_offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u64base_offset_u64_with_svstnt1b_scatter_u64base_offset_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1sb_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64base_offset_u64_with_svstnt1h_scatter_u64base_offset_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64base_offset_u64_with_svstnt1w_scatter_u64base_offset_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u64base_s64_with_svstnt1b_scatter_u64base_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_s64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1sb_gather_u64base_s64(svptrue_b8(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64base_s64_with_svstnt1h_scatter_u64base_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_s64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u64base_s64(svptrue_b16(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64base_s64_with_svstnt1w_scatter_u64base_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_s64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1sw_gather_u64base_s64(svptrue_b32(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u64base_u64_with_svstnt1b_scatter_u64base_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_u64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1sb_gather_u64base_u64(svptrue_b8(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64base_u64_with_svstnt1h_scatter_u64base_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_u64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u64base_u64(svptrue_b16(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64base_u64_with_svstnt1w_scatter_u64base_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_u64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1sw_gather_u64base_u64(svptrue_b32(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_s64index_s64_with_svstnt1h_scatter_s64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1h_scatter_s64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_s64index_s64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_s64index_s64_with_svstnt1w_scatter_s64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1w_scatter_s64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_s64index_s64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_s64index_u64_with_svstnt1h_scatter_s64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1h_scatter_s64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_s64index_u64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_s64index_u64_with_svstnt1w_scatter_s64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1w_scatter_s64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_s64index_u64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64index_s64_with_svstnt1h_scatter_u64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1h_scatter_u64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u64index_s64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64index_s64_with_svstnt1w_scatter_u64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1w_scatter_u64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_u64index_s64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64index_u64_with_svstnt1h_scatter_u64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1h_scatter_u64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u64index_u64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64index_u64_with_svstnt1w_scatter_u64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1w_scatter_u64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_u64index_u64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u32base_index_s32_with_svstnt1h_scatter_u32base_index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u32base_index_u32_with_svstnt1h_scatter_u32base_index_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64base_index_s64_with_svstnt1h_scatter_u64base_index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64base_index_s64_with_svstnt1w_scatter_u64base_index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1sw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64base_index_u64_with_svstnt1h_scatter_u64base_index_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64base_index_u64_with_svstnt1w_scatter_u64base_index_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1sw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_s64offset_s64_with_svstnt1b_scatter_s64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_s64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = + svldnt1ub_gather_s64offset_s64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_s64offset_s64_with_svstnt1h_scatter_s64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_s64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_s64offset_s64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_s64offset_s64_with_svstnt1w_scatter_s64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_s64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_s64offset_s64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_s64offset_u64_with_svstnt1b_scatter_s64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_s64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = + svldnt1ub_gather_s64offset_u64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_s64offset_u64_with_svstnt1h_scatter_s64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_s64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_s64offset_u64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_s64offset_u64_with_svstnt1w_scatter_s64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_s64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_s64offset_u64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u32offset_s32_with_svstnt1b_scatter_u32offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = + svldnt1ub_gather_u32offset_s32(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u32offset_s32_with_svstnt1h_scatter_u32offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u32offset_s32(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u32offset_u32_with_svstnt1b_scatter_u32offset_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = + svldnt1ub_gather_u32offset_u32(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u32offset_u32_with_svstnt1h_scatter_u32offset_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u32offset_u32(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u64offset_s64_with_svstnt1b_scatter_u64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = + svldnt1ub_gather_u64offset_s64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64offset_s64_with_svstnt1h_scatter_u64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u64offset_s64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64offset_s64_with_svstnt1w_scatter_u64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_u64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_u64offset_s64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u64offset_u64_with_svstnt1b_scatter_u64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = + svldnt1ub_gather_u64offset_u64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64offset_u64_with_svstnt1h_scatter_u64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u64offset_u64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64offset_u64_with_svstnt1w_scatter_u64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_u64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_u64offset_u64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u32base_offset_s32_with_svstnt1b_scatter_u32base_offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1ub_gather_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u32base_offset_s32_with_svstnt1h_scatter_u32base_offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u32base_offset_u32_with_svstnt1b_scatter_u32base_offset_u32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1ub_gather_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u32base_offset_u32_with_svstnt1h_scatter_u32base_offset_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u64base_offset_s64_with_svstnt1b_scatter_u64base_offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1ub_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64base_offset_s64_with_svstnt1h_scatter_u64base_offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64base_offset_s64_with_svstnt1w_scatter_u64base_offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u64base_offset_u64_with_svstnt1b_scatter_u64base_offset_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1ub_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64base_offset_u64_with_svstnt1h_scatter_u64base_offset_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64base_offset_u64_with_svstnt1w_scatter_u64base_offset_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u64base_s64_with_svstnt1b_scatter_u64base_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_s64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1ub_gather_u64base_s64(svptrue_b8(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64base_s64_with_svstnt1h_scatter_u64base_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_s64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u64base_s64(svptrue_b16(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64base_s64_with_svstnt1w_scatter_u64base_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_s64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1uw_gather_u64base_s64(svptrue_b32(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u64base_u64_with_svstnt1b_scatter_u64base_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_u64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1ub_gather_u64base_u64(svptrue_b8(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64base_u64_with_svstnt1h_scatter_u64base_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_u64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u64base_u64(svptrue_b16(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64base_u64_with_svstnt1w_scatter_u64base_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_u64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1uw_gather_u64base_u64(svptrue_b32(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_s64index_s64_with_svstnt1h_scatter_s64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1h_scatter_s64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_s64index_s64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_s64index_s64_with_svstnt1w_scatter_s64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1w_scatter_s64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_s64index_s64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_s64index_u64_with_svstnt1h_scatter_s64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1h_scatter_s64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_s64index_u64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_s64index_u64_with_svstnt1w_scatter_s64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1w_scatter_s64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_s64index_u64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64index_s64_with_svstnt1h_scatter_u64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1h_scatter_u64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u64index_s64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64index_s64_with_svstnt1w_scatter_u64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1w_scatter_u64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_u64index_s64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64index_u64_with_svstnt1h_scatter_u64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1h_scatter_u64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u64index_u64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64index_u64_with_svstnt1w_scatter_u64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1w_scatter_u64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_u64index_u64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u32base_index_s32_with_svstnt1h_scatter_u32base_index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u32base_index_u32_with_svstnt1h_scatter_u32base_index_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64base_index_s64_with_svstnt1h_scatter_u64base_index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64base_index_s64_with_svstnt1w_scatter_u64base_index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1uw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64base_index_u64_with_svstnt1h_scatter_u64base_index_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64base_index_u64_with_svstnt1w_scatter_u64base_index_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1uw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} From a50a2e475455226b67117b1086df2bb3b6285730 Mon Sep 17 00:00:00 2001 From: Vadim Petrochenkov Date: Fri, 27 Mar 2026 16:35:35 +0300 Subject: [PATCH 40/64] resolve: Introduce `(Local,Extern)Module` newtypes for local and external modules respectively --- .../rustc_resolve/src/build_reduced_graph.rs | 79 +++++----- compiler/rustc_resolve/src/check_unused.rs | 2 +- compiler/rustc_resolve/src/diagnostics.rs | 10 +- compiler/rustc_resolve/src/ident.rs | 18 +-- compiler/rustc_resolve/src/imports.rs | 19 ++- compiler/rustc_resolve/src/late.rs | 18 +-- .../rustc_resolve/src/late/diagnostics.rs | 14 +- compiler/rustc_resolve/src/lib.rs | 142 +++++++++++++----- compiler/rustc_resolve/src/macros.rs | 4 +- 9 files changed, 197 insertions(+), 109 deletions(-) diff --git a/compiler/rustc_resolve/src/build_reduced_graph.rs b/compiler/rustc_resolve/src/build_reduced_graph.rs index 50977ba6cff5f..ea3df1e6361bb 100644 --- a/compiler/rustc_resolve/src/build_reduced_graph.rs +++ b/compiler/rustc_resolve/src/build_reduced_graph.rs @@ -36,9 +36,9 @@ use crate::imports::{ImportData, ImportKind, OnUnknownData}; use crate::macros::{MacroRulesDecl, MacroRulesScope, MacroRulesScopeRef}; use crate::ref_mut::CmCell; use crate::{ - BindingKey, Decl, DeclData, DeclKind, ExternPreludeEntry, Finalize, IdentKey, MacroData, - Module, ModuleKind, ModuleOrUniformRoot, ParentScope, PathResult, ResolutionError, Resolver, - Segment, Used, VisResolutionError, errors, + BindingKey, Decl, DeclData, DeclKind, ExternModule, ExternPreludeEntry, Finalize, IdentKey, + LocalModule, MacroData, Module, ModuleKind, ModuleOrUniformRoot, ParentScope, PathResult, + ResolutionError, Resolver, Segment, Used, VisResolutionError, errors, }; type Res = def::Res; @@ -63,7 +63,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { /// Create a name definition from the given components, and put it into the local module. fn define_local( &mut self, - parent: Module<'ra>, + parent: LocalModule<'ra>, orig_ident: Ident, ns: Namespace, res: Res, @@ -71,7 +71,8 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { span: Span, expn_id: LocalExpnId, ) { - let decl = self.arenas.new_def_decl(res, vis.to_def_id(), span, expn_id, Some(parent)); + let decl = + self.arenas.new_def_decl(res, vis.to_def_id(), span, expn_id, Some(parent.to_module())); let ident = IdentKey::new(orig_ident); self.plant_decl_into_local_module(ident, orig_ident.span, ns, decl); } @@ -79,7 +80,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { /// Create a name definition from the given components, and put it into the extern module. fn define_extern( &self, - parent: Module<'ra>, + parent: ExternModule<'ra>, ident: IdentKey, orig_ident_span: Span, ns: Namespace, @@ -98,7 +99,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { vis: CmCell::new(vis), span, expansion, - parent_module: Some(parent), + parent_module: Some(parent.to_module()), }); // Even if underscore names cannot be looked up, we still need to add them to modules, // because they can be fetched by glob imports from those modules, and bring traits @@ -106,7 +107,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { let key = BindingKey::new_disambiguated(ident, ns, || (child_index + 1).try_into().unwrap()); // 0 indicates no underscore if self - .resolution_or_default(parent, key, orig_ident_span) + .resolution_or_default(parent.to_module(), key, orig_ident_span) .borrow_mut_unchecked() .non_glob_decl .replace(decl) @@ -150,30 +151,30 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { /// returns `None`. pub(crate) fn get_module(&self, def_id: DefId) -> Option> { match def_id.as_local() { - Some(local_def_id) => self.local_module_map.get(&local_def_id).copied(), + Some(local_def_id) => self.local_module_map.get(&local_def_id).map(|m| m.to_module()), None => { if let module @ Some(..) = self.extern_module_map.borrow().get(&def_id) { - return module.copied(); + return module.map(|m| m.to_module()); } // Query `def_kind` is not used because query system overhead is too expensive here. let def_kind = self.cstore().def_kind_untracked(self.tcx, def_id); if def_kind.is_module_like() { - let parent = self - .tcx - .opt_parent(def_id) - .map(|parent_id| self.get_nearest_non_block_module(parent_id)); + let parent = self.tcx.opt_parent(def_id).map(|parent_id| { + self.get_nearest_non_block_module(parent_id).expect_extern() + }); // Query `expn_that_defined` is not used because // hashing spans in its result is expensive. let expn_id = self.cstore().expn_that_defined_untracked(self.tcx, def_id); - return Some(self.new_extern_module( + let module = self.new_extern_module( parent, ModuleKind::Def(def_kind, def_id, Some(self.tcx.item_name(def_id))), expn_id, self.def_span(def_id), // FIXME: Account for `#[no_implicit_prelude]` attributes. parent.is_some_and(|module| module.no_implicit_prelude), - )); + ); + return Some(module.to_module()); } None @@ -187,13 +188,14 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { None => expn_id .as_local() .and_then(|expn_id| self.ast_transform_scopes.get(&expn_id).copied()) - .unwrap_or(self.graph_root), + .unwrap_or(self.graph_root) + .to_module(), } } pub(crate) fn macro_def_scope(&self, def_id: DefId) -> Module<'ra> { if let Some(id) = def_id.as_local() { - self.local_macro_def_scopes[&id] + self.local_macro_def_scopes[&id].to_module() } else { self.get_nearest_non_block_module(def_id) } @@ -247,10 +249,10 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { visitor.parent_scope.macro_rules } - pub(crate) fn build_reduced_graph_external(&self, module: Module<'ra>) { + pub(crate) fn build_reduced_graph_external(&self, module: ExternModule<'ra>) { let def_id = module.def_id(); let children = self.tcx.module_children(def_id); - let parent_scope = ParentScope::module(module, self.arenas); + let parent_scope = ParentScope::module(module.to_module(), self.arenas); for (i, child) in children.iter().enumerate() { self.build_reduced_graph_for_external_crate_res(child, parent_scope, i, None) } @@ -274,7 +276,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { child_index: usize, ambig_child: Option<&ModChild>, ) { - let parent = parent_scope.module; + let parent = parent_scope.module.expect_extern(); let child_span = |this: &Self, reexport_chain: &[Reexport], res: def::Res<_>| { this.def_span( reexport_chain @@ -292,7 +294,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { let ModChild { ident: _, res, vis, ref reexport_chain } = *ambig_child; let span = child_span(self, reexport_chain, res); let res = res.expect_non_local(); - self.arenas.new_def_decl(res, vis, span, expansion, Some(parent)) + self.arenas.new_def_decl(res, vis, span, expansion, Some(parent.to_module())) }); // Record primary definitions. @@ -802,7 +804,7 @@ impl<'a, 'ra, 'tcx> BuildReducedGraphVisitor<'a, 'ra, 'tcx> { adt_span: Span, ) { let parent_scope = &self.parent_scope; - let parent = parent_scope.module; + let parent = parent_scope.module.expect_local(); let expansion = parent_scope.expansion; // Define a name in the type namespace if it is not anonymous. @@ -818,7 +820,7 @@ impl<'a, 'ra, 'tcx> BuildReducedGraphVisitor<'a, 'ra, 'tcx> { /// Constructs the reduced graph for one item. fn build_reduced_graph_for_item(&mut self, item: &'a Item) { let parent_scope = &self.parent_scope; - let parent = parent_scope.module; + let parent = parent_scope.module.expect_local(); let expansion = parent_scope.expansion; let sp = item.span; let vis = self.resolve_visibility(&item.vis); @@ -863,7 +865,7 @@ impl<'a, 'ra, 'tcx> BuildReducedGraphVisitor<'a, 'ra, 'tcx> { { self.r.mods_with_parse_errors.insert(def_id); } - self.parent_scope.module = self.r.new_local_module( + let module = self.r.new_local_module( Some(parent), ModuleKind::Def(def_kind, def_id, Some(ident.name)), expansion.to_expn_id(), @@ -871,6 +873,7 @@ impl<'a, 'ra, 'tcx> BuildReducedGraphVisitor<'a, 'ra, 'tcx> { parent.no_implicit_prelude || ast::attr::contains_name(&item.attrs, sym::no_implicit_prelude), ); + self.parent_scope.module = module.to_module(); } // These items live in the value namespace. @@ -896,13 +899,14 @@ impl<'a, 'ra, 'tcx> BuildReducedGraphVisitor<'a, 'ra, 'tcx> { ItemKind::Enum(ident, _, _) | ItemKind::Trait(box ast::Trait { ident, .. }) => { self.r.define_local(parent, ident, TypeNS, res, vis, sp, expansion); - self.parent_scope.module = self.r.new_local_module( + let module = self.r.new_local_module( Some(parent), ModuleKind::Def(def_kind, def_id, Some(ident.name)), expansion.to_expn_id(), item.span, parent.no_implicit_prelude, ); + self.parent_scope.module = module.to_module(); } // These items live in both the type and value namespaces. @@ -998,7 +1002,7 @@ impl<'a, 'ra, 'tcx> BuildReducedGraphVisitor<'a, 'ra, 'tcx> { self.r.dcx().emit_err(errors::ExternCrateSelfRequiresRenaming { span: sp }); return; } else if orig_name == Some(kw::SelfLower) { - Some(self.r.graph_root) + Some(self.r.graph_root.to_module()) } else { let tcx = self.r.tcx; let crate_id = self.r.cstore_mut().process_extern_crate( @@ -1039,7 +1043,7 @@ impl<'a, 'ra, 'tcx> BuildReducedGraphVisitor<'a, 'ra, 'tcx> { self.r.potentially_unused_imports.push(import); let import_decl = self.r.new_import_decl(decl, import); let ident = IdentKey::new(orig_ident); - if ident.name != kw::Underscore && parent == self.r.graph_root { + if ident.name != kw::Underscore && parent == self.r.graph_root.to_module() { // FIXME: this error is technically unnecessary now when extern prelude is split into // two scopes, remove it with lang team approval. if let Some(entry) = self.r.extern_prelude.get(&ident) @@ -1084,7 +1088,7 @@ impl<'a, 'ra, 'tcx> BuildReducedGraphVisitor<'a, 'ra, 'tcx> { ForeignItemKind::TyAlias(..) => TypeNS, ForeignItemKind::MacCall(..) => unreachable!(), }; - let parent = self.parent_scope.module; + let parent = self.parent_scope.module.expect_local(); let expansion = self.parent_scope.expansion; let vis = self.resolve_visibility(&item.vis); self.r.define_local(parent, ident, ns, self.res(def_id), vis, item.span, expansion); @@ -1092,7 +1096,7 @@ impl<'a, 'ra, 'tcx> BuildReducedGraphVisitor<'a, 'ra, 'tcx> { } fn build_reduced_graph_for_block(&mut self, block: &Block) { - let parent = self.parent_scope.module; + let parent = self.parent_scope.module.expect_local(); let expansion = self.parent_scope.expansion; if self.block_needs_anonymous_module(block) { let module = self.r.new_local_module( @@ -1103,7 +1107,7 @@ impl<'a, 'ra, 'tcx> BuildReducedGraphVisitor<'a, 'ra, 'tcx> { parent.no_implicit_prelude, ); self.r.block_map.insert(block.id, module); - self.parent_scope.module = module; // Descend into the block. + self.parent_scope.module = module.to_module(); // Descend into the block. } } @@ -1303,7 +1307,7 @@ impl<'a, 'ra, 'tcx> BuildReducedGraphVisitor<'a, 'ra, 'tcx> { _ => unreachable!(), }; - self.r.local_macro_def_scopes.insert(def_id, parent_scope.module); + self.r.local_macro_def_scopes.insert(def_id, parent_scope.module.expect_local()); if macro_rules { let ident = IdentKey::new(orig_ident); @@ -1326,7 +1330,10 @@ impl<'a, 'ra, 'tcx> BuildReducedGraphVisitor<'a, 'ra, 'tcx> { let import = self.r.arenas.alloc_import(ImportData { kind: ImportKind::MacroExport, root_id: item.id, - parent_scope: ParentScope { module: self.r.graph_root, ..parent_scope }, + parent_scope: ParentScope { + module: self.r.graph_root.to_module(), + ..parent_scope + }, imported_module: CmCell::new(None), has_attributes: false, use_span_with_attributes: span, @@ -1357,7 +1364,7 @@ impl<'a, 'ra, 'tcx> BuildReducedGraphVisitor<'a, 'ra, 'tcx> { self.r.macro_rules_scopes.insert(def_id, scope); scope } else { - let module = parent_scope.module; + let module = parent_scope.module.expect_local(); let vis = match item.kind { // Visibilities must not be resolved non-speculatively twice // and we already resolved this one as a `fn` item visibility. @@ -1505,7 +1512,7 @@ impl<'a, 'ra, 'tcx> Visitor<'a> for BuildReducedGraphVisitor<'a, 'ra, 'tcx> { } if ctxt == AssocCtxt::Trait { - let parent = self.parent_scope.module; + let parent = self.parent_scope.module.expect_local(); let expansion = self.parent_scope.expansion; self.r.define_local(parent, ident, ns, self.res(def_id), vis, item.span, expansion); } else if !matches!(&item.kind, AssocItemKind::Delegation(deleg) if deleg.from_glob) @@ -1587,7 +1594,7 @@ impl<'a, 'ra, 'tcx> Visitor<'a> for BuildReducedGraphVisitor<'a, 'ra, 'tcx> { return; } - let parent = self.parent_scope.module; + let parent = self.parent_scope.module.expect_local(); let expn_id = self.parent_scope.expansion; let ident = variant.ident; diff --git a/compiler/rustc_resolve/src/check_unused.rs b/compiler/rustc_resolve/src/check_unused.rs index b5246808cd5ad..7e1b1bce3ff74 100644 --- a/compiler/rustc_resolve/src/check_unused.rs +++ b/compiler/rustc_resolve/src/check_unused.rs @@ -545,7 +545,7 @@ impl Resolver<'_, '_> { let unused_imports = visitor.unused_imports; let mut check_redundant_imports = FxIndexSet::default(); for module in &self.local_modules { - for (_key, resolution) in self.resolutions(*module).borrow().iter() { + for (_key, resolution) in self.resolutions(module.to_module()).borrow().iter() { if let Some(decl) = resolution.borrow().best_decl() && let DeclKind::Import { import, .. } = decl.kind && let ImportKind::Single { id, .. } = import.kind diff --git a/compiler/rustc_resolve/src/diagnostics.rs b/compiler/rustc_resolve/src/diagnostics.rs index 9e0f04e82b472..9436508f1934b 100644 --- a/compiler/rustc_resolve/src/diagnostics.rs +++ b/compiler/rustc_resolve/src/diagnostics.rs @@ -1574,7 +1574,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { lookup_ident, namespace, parent_scope, - self.graph_root, + self.graph_root.to_module(), crate_path, &filter_fn, ); @@ -2063,7 +2063,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { if kind != AmbiguityKind::GlobVsGlob { if let Scope::ModuleNonGlobs(module, _) | Scope::ModuleGlobs(module, _) = scope { - if module == self.graph_root { + if module == self.graph_root.to_module() { help_msgs.push(format!( "use `crate::{ident}` to refer to this {thing} unambiguously" )); @@ -2441,7 +2441,8 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { self.local_module_map .iter() .filter(|(_, module)| { - current_module.is_ancestor_of(**module) && current_module != **module + let module = module.to_module(); + current_module.is_ancestor_of(module) && current_module != module }) .flat_map(|(_, module)| module.kind.name()), ) @@ -2450,7 +2451,8 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { .borrow() .iter() .filter(|(_, module)| { - current_module.is_ancestor_of(**module) && current_module != **module + let module = module.to_module(); + current_module.is_ancestor_of(module) && current_module != module }) .flat_map(|(_, module)| module.kind.name()), ) diff --git a/compiler/rustc_resolve/src/ident.rs b/compiler/rustc_resolve/src/ident.rs index 46b4a3aa25864..3a9e61bf5ab58 100644 --- a/compiler/rustc_resolve/src/ident.rs +++ b/compiler/rustc_resolve/src/ident.rs @@ -24,9 +24,9 @@ use crate::late::{ use crate::macros::{MacroRulesScope, sub_namespace_match}; use crate::{ AmbiguityError, AmbiguityKind, AmbiguityWarning, BindingKey, CmResolver, Decl, DeclKind, - Determinacy, Finalize, IdentKey, ImportKind, LateDecl, Module, ModuleKind, ModuleOrUniformRoot, - ParentScope, PathResult, PrivacyError, Res, ResolutionError, Resolver, Scope, ScopeSet, - Segment, Stage, Symbol, Used, errors, + Determinacy, Finalize, IdentKey, ImportKind, LateDecl, LocalModule, Module, ModuleKind, + ModuleOrUniformRoot, ParentScope, PathResult, PrivacyError, Res, ResolutionError, Resolver, + Scope, ScopeSet, Segment, Stage, Symbol, Used, errors, }; #[derive(Copy, Clone)] @@ -346,7 +346,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { } else if let RibKind::Block(Some(module)) = rib.kind && let Ok(binding) = self.cm().resolve_ident_in_scope_set( ident, - ScopeSet::Module(ns, module), + ScopeSet::Module(ns, module.to_module()), parent_scope, finalize.map(|finalize| Finalize { used: Used::Scope, ..finalize }), ignore_decl, @@ -357,7 +357,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { return Some(LateDecl::Decl(binding)); } else if let RibKind::Module(module) = rib.kind { // Encountered a module item, abandon ribs and look into that module and preludes. - let parent_scope = &ParentScope { module, ..*parent_scope }; + let parent_scope = &ParentScope { module: module.to_module(), ..*parent_scope }; let finalize = finalize.map(|f| Finalize { stage: Stage::Late, ..f }); return self .cm() @@ -658,7 +658,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { ) }; let binding = self.reborrow().resolve_ident_in_module_globs_unadjusted( - module, + module.expect_local(), ident, orig_ident_span, ns, @@ -1123,7 +1123,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { /// Attempts to resolve `ident` in namespace `ns` of glob bindings in `module`. fn resolve_ident_in_module_globs_unadjusted<'r>( mut self: CmResolver<'r, 'ra, 'tcx>, - module: Module<'ra>, + module: LocalModule<'ra>, ident: IdentKey, orig_ident_span: Span, ns: Namespace, @@ -1138,7 +1138,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { // doesn't need to be mutable. It will fail when there is a cycle of imports, and without // the exclusive access infinite recursion will crash the compiler with stack overflow. let resolution = &*self - .resolution_or_default(module, key, orig_ident_span) + .resolution_or_default(module.to_module(), key, orig_ident_span) .try_borrow_mut_unchecked() .map_err(|_| ControlFlow::Continue(Determined))?; @@ -1150,7 +1150,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { orig_ident_span, binding, parent_scope, - module, + module.to_module(), finalize, shadowing, ); diff --git a/compiler/rustc_resolve/src/imports.rs b/compiler/rustc_resolve/src/imports.rs index e24e65d55c00f..89d6fee0d3ec9 100644 --- a/compiler/rustc_resolve/src/imports.rs +++ b/compiler/rustc_resolve/src/imports.rs @@ -37,7 +37,7 @@ use crate::errors::{ use crate::ref_mut::CmCell; use crate::{ AmbiguityError, BindingKey, CmResolver, Decl, DeclData, DeclKind, Determinacy, Finalize, - IdentKey, ImportSuggestion, Module, ModuleOrUniformRoot, ParentScope, PathResult, PerNS, + IdentKey, ImportSuggestion, LocalModule, ModuleOrUniformRoot, ParentScope, PathResult, PerNS, ResolutionError, Resolver, ScopeSet, Segment, Used, module_to_string, names_to_string, }; @@ -466,7 +466,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { decl: Decl<'ra>, warn_ambiguity: bool, ) -> Result<(), Decl<'ra>> { - let module = decl.parent_module.unwrap(); + let module = decl.parent_module.unwrap().expect_local(); let res = decl.res(); self.check_reserved_macro_name(ident.name, orig_ident_span, res); // Even if underscore names cannot be looked up, we still need to add them to modules, @@ -528,7 +528,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { // If the resolution becomes a success, define it in the module's glob importers. fn update_local_resolution( &mut self, - module: Module<'ra>, + module: LocalModule<'ra>, key: BindingKey, orig_ident_span: Span, warn_ambiguity: bool, @@ -541,7 +541,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { // during which the resolution might end up getting re-defined via a glob cycle. let (binding, t, warn_ambiguity) = { let resolution = &mut *self - .resolution_or_default(module, key, orig_ident_span) + .resolution_or_default(module.to_module(), key, orig_ident_span) .borrow_mut_unchecked(); let old_decl = resolution.binding(); @@ -596,7 +596,6 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { let dummy_decl = self.dummy_decl; let dummy_decl = self.new_import_decl(dummy_decl, import); self.per_ns(|this, ns| { - let module = import.parent_scope.module; let ident = IdentKey::new(target); let _ = this.try_plant_decl_into_local_module( ident, @@ -609,7 +608,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { if target.name != kw::Underscore { let key = BindingKey::new(ident, ns); this.update_local_resolution( - module, + import.parent_scope.module.expect_local(), key, target.span, false, @@ -747,7 +746,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { pub(crate) fn lint_reexports(&mut self, exported_ambiguities: FxHashSet>) { for module in &self.local_modules { - for (key, resolution) in self.resolutions(*module).borrow().iter() { + for (key, resolution) in self.resolutions(module.to_module()).borrow().iter() { let resolution = resolution.borrow(); let Some(binding) = resolution.best_decl() else { continue }; @@ -1040,7 +1039,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { if target.name != kw::Underscore { let key = BindingKey::new(IdentKey::new(target), ns); this.get_mut_unchecked().update_local_resolution( - parent, + parent.expect_local(), key, target.span, false, @@ -1712,7 +1711,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { // reporting conflicts, and reporting unresolved imports. fn finalize_resolutions_in( &self, - module: Module<'ra>, + module: LocalModule<'ra>, module_children: &mut LocalDefIdMap>, ambig_module_children: &mut LocalDefIdMap>, ) { @@ -1724,7 +1723,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { let mut children = Vec::new(); let mut ambig_children = Vec::new(); - module.for_each_child(self, |this, ident, orig_ident_span, _, binding| { + module.to_module().for_each_child(self, |this, ident, orig_ident_span, _, binding| { let res = binding.res().expect_non_local(); if res != def::Res::Err { let ident = ident.orig(orig_ident_span); diff --git a/compiler/rustc_resolve/src/late.rs b/compiler/rustc_resolve/src/late.rs index 2c287045be7a9..157600c926e9f 100644 --- a/compiler/rustc_resolve/src/late.rs +++ b/compiler/rustc_resolve/src/late.rs @@ -40,9 +40,9 @@ use thin_vec::ThinVec; use tracing::{debug, instrument, trace}; use crate::{ - BindingError, BindingKey, Decl, DelegationFnSig, Finalize, IdentKey, LateDecl, Module, - ModuleOrUniformRoot, ParentScope, PathResult, ResolutionError, Resolver, Segment, Stage, - TyCtxt, UseError, Used, errors, path_names_to_string, rustdoc, + BindingError, BindingKey, Decl, DelegationFnSig, Finalize, IdentKey, LateDecl, LocalModule, + Module, ModuleOrUniformRoot, ParentScope, PathResult, ResolutionError, Resolver, Segment, + Stage, TyCtxt, UseError, Used, errors, path_names_to_string, rustdoc, }; mod diagnostics; @@ -198,7 +198,7 @@ pub(crate) enum RibKind<'ra> { /// `Block(None)` must be always processed in the same way as `Block(Some(module))` /// with empty `module`. The module can be `None` only because creation of some definitely /// empty modules is skipped as an optimization. - Block(Option>), + Block(Option>), /// We passed through an impl or trait and are now in one of its /// methods or associated types. Allow references to ty params that impl or trait @@ -219,7 +219,7 @@ pub(crate) enum RibKind<'ra> { ConstantItem(ConstantHasGenerics, Option<(Ident, ConstantItemKind)>), /// We passed through a module item. - Module(Module<'ra>), + Module(LocalModule<'ra>), /// We passed through a `macro_rules!` statement MacroDefinition(DefId), @@ -1484,7 +1484,7 @@ impl<'a, 'ast, 'ra, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { // During late resolution we only track the module component of the parent scope, // although it may be useful to track other components as well for diagnostics. let graph_root = resolver.graph_root; - let parent_scope = ParentScope::module(graph_root, resolver.arenas); + let parent_scope = ParentScope::module(graph_root.to_module(), resolver.arenas); let start_rib_kind = RibKind::Module(graph_root); LateResolutionVisitor { r: resolver, @@ -2886,8 +2886,8 @@ impl<'a, 'ast, 'ra, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { ItemKind::Mod(..) => { let module = self.r.expect_module(self.r.local_def_id(item.id).to_def_id()); let orig_module = replace(&mut self.parent_scope.module, module); - self.with_rib(ValueNS, RibKind::Module(module), |this| { - this.with_rib(TypeNS, RibKind::Module(module), |this| { + self.with_rib(ValueNS, RibKind::Module(module.expect_local()), |this| { + this.with_rib(TypeNS, RibKind::Module(module.expect_local()), |this| { if mod_inner_docs { this.resolve_doc_links(&item.attrs, MaybeExported::Ok(item.id)); } @@ -5019,7 +5019,7 @@ impl<'a, 'ast, 'ra, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { debug!("(resolving block) found anonymous module, moving down"); self.ribs[ValueNS].push(Rib::new(RibKind::Block(Some(anonymous_module)))); self.ribs[TypeNS].push(Rib::new(RibKind::Block(Some(anonymous_module)))); - self.parent_scope.module = anonymous_module; + self.parent_scope.module = anonymous_module.to_module(); } else { self.ribs[ValueNS].push(Rib::new(RibKind::Block(None))); } diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs index 467f03fa46fdd..cbb3c9eb570cf 100644 --- a/compiler/rustc_resolve/src/late/diagnostics.rs +++ b/compiler/rustc_resolve/src/late/diagnostics.rs @@ -1131,7 +1131,7 @@ impl<'ast, 'ra, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { for rib in self.ribs[ns].iter().rev() { let item = path[0].ident; if let RibKind::Module(module) | RibKind::Block(Some(module)) = rib.kind - && let Some(did) = find_doc_alias_name(self.r, module, item.name) + && let Some(did) = find_doc_alias_name(self.r, module.to_module(), item.name) { return Some((did, item)); } @@ -2903,10 +2903,16 @@ impl<'ast, 'ra, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { } if let RibKind::Block(Some(module)) = rib.kind { - self.r.add_module_candidates(module, &mut names, &filter_fn, Some(ctxt)); + self.r.add_module_candidates( + module.to_module(), + &mut names, + &filter_fn, + Some(ctxt), + ); } else if let RibKind::Module(module) = rib.kind { // Encountered a module item, abandon ribs and look into that module and preludes. - let parent_scope = &ParentScope { module, ..self.parent_scope }; + let parent_scope = + &ParentScope { module: module.to_module(), ..self.parent_scope }; self.r.add_scope_set_candidates( &mut names, ScopeSet::All(ns), @@ -3049,7 +3055,7 @@ impl<'ast, 'ra, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { let mut seen_modules = FxHashSet::default(); let root_did = self.r.graph_root.def_id(); let mut worklist = vec![( - self.r.graph_root, + self.r.graph_root.to_module(), ThinVec::new(), root_did.is_local() || !self.r.tcx.is_doc_hidden(root_did), )]; diff --git a/compiler/rustc_resolve/src/lib.rs b/compiler/rustc_resolve/src/lib.rs index d75f2981a7724..902cca7340e85 100644 --- a/compiler/rustc_resolve/src/lib.rs +++ b/compiler/rustc_resolve/src/lib.rs @@ -686,6 +686,16 @@ struct ModuleData<'ra> { #[rustc_pass_by_value] struct Module<'ra>(Interned<'ra, ModuleData<'ra>>); +/// Same as `Module`, but is guaranteed to be from the current crate. +#[derive(Clone, Copy, PartialEq, Eq, Hash)] +#[rustc_pass_by_value] +struct LocalModule<'ra>(Interned<'ra, ModuleData<'ra>>); + +/// Same as `Module`, but is guaranteed to be from an external crate. +#[derive(Clone, Copy, PartialEq, Eq, Hash)] +#[rustc_pass_by_value] +struct ExternModule<'ra>(Interned<'ra, ModuleData<'ra>>); + // Allows us to use Interned without actually enforcing (via Hash/PartialEq/...) uniqueness of the // contained data. // FIXME: We may wish to actually have at least debug-level assertions that Interned's guarantees @@ -728,6 +738,21 @@ impl<'ra> ModuleData<'ra> { self_decl, } } + + fn opt_def_id(&self) -> Option { + self.kind.opt_def_id() + } + + fn def_id(&self) -> DefId { + self.opt_def_id().expect("`ModuleData::def_id` is called on a block module") + } + + fn res(&self) -> Option { + match self.kind { + ModuleKind::Def(kind, def_id, _) => Some(Res::Def(kind, def_id)), + _ => None, + } + } } impl<'ra> Module<'ra> { @@ -779,21 +804,6 @@ impl<'ra> Module<'ra> { } } - fn res(self) -> Option { - match self.kind { - ModuleKind::Def(kind, def_id, _) => Some(Res::Def(kind, def_id)), - _ => None, - } - } - - fn def_id(self) -> DefId { - self.opt_def_id().expect("`ModuleData::def_id` is called on a block module") - } - - fn opt_def_id(self) -> Option { - self.kind.opt_def_id() - } - // `self` resolves to the first module ancestor that `is_normal`. fn is_normal(self) -> bool { matches!(self.kind, ModuleKind::Def(DefKind::Mod, _, _)) @@ -831,6 +841,38 @@ impl<'ra> Module<'ra> { } true } + + #[track_caller] + fn expect_local(self) -> LocalModule<'ra> { + match self.kind { + ModuleKind::Def(_, def_id, _) if !def_id.is_local() => { + panic!("`Module::expect_local` is called on a non-local module: {self:?}") + } + ModuleKind::Def(..) | ModuleKind::Block => LocalModule(self.0), + } + } + + #[track_caller] + fn expect_extern(self) -> ExternModule<'ra> { + match self.kind { + ModuleKind::Def(_, def_id, _) if !def_id.is_local() => ExternModule(self.0), + ModuleKind::Def(..) | ModuleKind::Block => { + panic!("`Module::expect_extern` is called on a local module: {self:?}") + } + } + } +} + +impl<'ra> LocalModule<'ra> { + fn to_module(self) -> Module<'ra> { + Module(self.0) + } +} + +impl<'ra> ExternModule<'ra> { + fn to_module(self) -> Module<'ra> { + Module(self.0) + } } impl<'ra> std::ops::Deref for Module<'ra> { @@ -841,6 +883,22 @@ impl<'ra> std::ops::Deref for Module<'ra> { } } +impl<'ra> std::ops::Deref for LocalModule<'ra> { + type Target = ModuleData<'ra>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl<'ra> std::ops::Deref for ExternModule<'ra> { + type Target = ModuleData<'ra>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + impl<'ra> fmt::Debug for Module<'ra> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.kind { @@ -850,6 +908,12 @@ impl<'ra> fmt::Debug for Module<'ra> { } } +impl<'ra> fmt::Debug for LocalModule<'ra> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.to_module().fmt(f) + } +} + /// Data associated with any name declaration. #[derive(Clone, Debug)] struct DeclData<'ra> { @@ -1195,7 +1259,7 @@ pub struct Resolver<'ra, 'tcx> { /// Item with a given `LocalDefId` was defined during macro expansion with ID `ExpnId`. expn_that_defined: UnordMap = Default::default(), - graph_root: Module<'ra>, + graph_root: LocalModule<'ra>, /// Assert that we are in speculative resolution mode. assert_speculative: bool, @@ -1254,17 +1318,17 @@ pub struct Resolver<'ra, 'tcx> { /// /// There will be an anonymous module created around `g` with the ID of the /// entry block for `f`. - block_map: NodeMap> = Default::default(), + block_map: NodeMap> = Default::default(), /// A fake module that contains no definition and no prelude. Used so that /// some AST passes can generate identifiers that only resolve to local or /// lang items. - empty_module: Module<'ra>, + empty_module: LocalModule<'ra>, /// All local modules, including blocks. - local_modules: Vec>, + local_modules: Vec>, /// Eagerly populated map of all local non-block modules. - local_module_map: FxIndexMap>, + local_module_map: FxIndexMap>, /// Lazily populated cache of modules loaded from external crates. - extern_module_map: CacheRefCell>>, + extern_module_map: CacheRefCell>>, /// Maps glob imports to the names of items actually imported. glob_map: FxIndexMap>, @@ -1304,8 +1368,8 @@ pub struct Resolver<'ra, 'tcx> { dummy_ext_bang: Arc, dummy_ext_derive: Arc, non_macro_attr: &'ra MacroData, - local_macro_def_scopes: FxHashMap> = default::fx_hash_map(), - ast_transform_scopes: FxHashMap> = default::fx_hash_map(), + local_macro_def_scopes: FxHashMap> = default::fx_hash_map(), + ast_transform_scopes: FxHashMap> = default::fx_hash_map(), unused_macros: FxIndexMap, /// A map from the macro to all its potentially unused arms. unused_macro_rules: FxIndexMap>, @@ -1653,6 +1717,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { crate_span, attr::contains_name(attrs, sym::no_implicit_prelude), ); + let graph_root = graph_root.expect_local(); let local_modules = vec![graph_root]; let local_module_map = FxIndexMap::from_iter([(CRATE_DEF_ID, graph_root)]); let empty_module = arenas.new_module( @@ -1663,6 +1728,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { DUMMY_SP, true, ); + let empty_module = empty_module.expect_local(); let mut node_id_to_def_id = NodeMap::default(); let crate_feed = tcx.create_local_crate_def_id(crate_span); @@ -1745,7 +1811,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { .. }; - let root_parent_scope = ParentScope::module(graph_root, resolver.arenas); + let root_parent_scope = ParentScope::module(graph_root.to_module(), resolver.arenas); resolver.invocation_parent_scopes.insert(LocalExpnId::ROOT, root_parent_scope); resolver.feed_visibility(crate_feed, Visibility::Public); @@ -1754,15 +1820,19 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { fn new_local_module( &mut self, - parent: Option>, + parent: Option>, kind: ModuleKind, expn_id: ExpnId, span: Span, no_implicit_prelude: bool, - ) -> Module<'ra> { + ) -> LocalModule<'ra> { + let parent = parent.map(|m| m.to_module()); let vis = kind.opt_def_id().map_or(Visibility::Public, |def_id| self.tcx.visibility(def_id)); - let module = self.arenas.new_module(parent, kind, vis, expn_id, span, no_implicit_prelude); + let module = self + .arenas + .new_module(parent, kind, vis, expn_id, span, no_implicit_prelude) + .expect_local(); self.local_modules.push(module); if let Some(def_id) = module.opt_def_id() { self.local_module_map.insert(def_id.expect_local(), module); @@ -1772,15 +1842,19 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { fn new_extern_module( &self, - parent: Option>, + parent: Option>, kind: ModuleKind, expn_id: ExpnId, span: Span, no_implicit_prelude: bool, - ) -> Module<'ra> { + ) -> ExternModule<'ra> { + let parent = parent.map(|m| m.to_module()); let vis = kind.opt_def_id().map_or(Visibility::Public, |def_id| self.tcx.visibility(def_id)); - let module = self.arenas.new_module(parent, kind, vis, expn_id, span, no_implicit_prelude); + let module = self + .arenas + .new_module(parent, kind, vis, expn_id, span, no_implicit_prelude) + .expect_extern(); self.extern_module_map.borrow_mut().insert(module.def_id(), module); module } @@ -2066,7 +2140,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { fn resolutions(&self, module: Module<'ra>) -> &'ra Resolutions<'ra> { if module.populate_on_access.get() { module.populate_on_access.set(false); - self.build_reduced_graph_external(module); + self.build_reduced_graph_external(module.expect_extern()); } &module.0.0.lazy_resolutions } @@ -2139,7 +2213,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { // Do not report the lint if the macro name resolves in stdlib prelude // even without the problematic `macro_use` import. let found_in_stdlib_prelude = self.prelude.is_some_and(|prelude| { - let empty_module = self.empty_module; + let empty_module = self.empty_module.to_module(); let arenas = self.arenas; self.cm() .maybe_resolve_ident_in_module( @@ -2251,7 +2325,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { "resolve_crate_root({:?}): found no mark (ident.span = {:?})", ident, ident.span ); - return self.graph_root; + return self.graph_root.to_module(); } }; let module = self.expect_module( @@ -2506,7 +2580,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { return; } - let module = self.graph_root; + let module = self.graph_root.to_module(); let ident = Ident::with_dummy_span(sym::main); let parent_scope = &ParentScope::module(module, self.arenas); diff --git a/compiler/rustc_resolve/src/macros.rs b/compiler/rustc_resolve/src/macros.rs index 67a896bdd7557..3b56a192664f4 100644 --- a/compiler/rustc_resolve/src/macros.rs +++ b/compiler/rustc_resolve/src/macros.rs @@ -242,8 +242,8 @@ impl<'ra, 'tcx> ResolverExpand for Resolver<'ra, 'tcx> { ) }); - let parent_scope = - parent_module.map_or(self.empty_module, |def_id| self.expect_module(def_id)); + let parent_scope = parent_module + .map_or(self.empty_module, |def_id| self.expect_module(def_id).expect_local()); self.ast_transform_scopes.insert(expn_id, parent_scope); expn_id From 6a6e8446b97e8a3dfc0984660253b1ac437a445a Mon Sep 17 00:00:00 2001 From: David Wood Date: Sat, 28 Feb 2026 21:24:33 +0000 Subject: [PATCH 41/64] intrinsics_data: add sve intrinsics Co-authored-by: Adam Gemmell Co-authored-by: Jamie Cunliffe Co-authored-by: Jacob Bramley Co-authored-by: Luca Vizzarro --- .../intrinsics_data/arm_intrinsics.json | 270972 +++++++++++++-- 1 file changed, 238271 insertions(+), 32701 deletions(-) diff --git a/library/stdarch/intrinsics_data/arm_intrinsics.json b/library/stdarch/intrinsics_data/arm_intrinsics.json index bce85d19a10f1..3a3b962a48730 100644 --- a/library/stdarch/intrinsics_data/arm_intrinsics.json +++ b/library/stdarch/intrinsics_data/arm_intrinsics.json @@ -224,21 +224,25 @@ ] }, { - "SIMD_ISA": "Neon", - "name": "vscale_f16", + "SIMD_ISA": "SVE2", + "name": "svaba[_n_s16]", "arguments": [ - "float16x4_t a", - "int16x4_t b" + "svint16_t op1", + "svint16_t op2", + "int16_t op3" ], "return_type": { - "value": "float16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ @@ -246,26 +250,34 @@ ], "instructions": [ [ - "FSCALE" + "SABA" + ], + [ + "MOVPRFX", + "SABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vscaleq_f16", + "SIMD_ISA": "SVE2", + "name": "svaba[_n_s32]", "arguments": [ - "float16x8_t a", - "int16x8_t b" + "svint32_t op1", + "svint32_t op2", + "int32_t op3" ], "return_type": { - "value": "float16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ @@ -273,26 +285,34 @@ ], "instructions": [ [ - "FSCALE" + "SABA" + ], + [ + "MOVPRFX", + "SABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vscale_f32", + "SIMD_ISA": "SVE2", + "name": "svaba[_n_s64]", "arguments": [ - "float32x2_t a", - "int32x2_t b" + "svint64_t op1", + "svint64_t op2", + "int64_t op3" ], "return_type": { - "value": "float32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" } }, "Architectures": [ @@ -300,26 +320,34 @@ ], "instructions": [ [ - "FSCALE" + "SABA" + ], + [ + "MOVPRFX", + "SABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vscaleq_f32", + "SIMD_ISA": "SVE2", + "name": "svaba[_n_s8]", "arguments": [ - "float32x4_t a", - "int32x4_t b" + "svint8_t op1", + "svint8_t op2", + "int8_t op3" ], "return_type": { - "value": "float32x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ @@ -327,26 +355,34 @@ ], "instructions": [ [ - "FSCALE" + "SABA" + ], + [ + "MOVPRFX", + "SABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vscaleq_f64", + "SIMD_ISA": "SVE2", + "name": "svaba[_n_u16]", "arguments": [ - "float64x2_t a", - "int64x2_t b" + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" ], "return_type": { - "value": "float64x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ @@ -354,228 +390,244 @@ ], "instructions": [ [ - "FSCALE" + "UABA" + ], + [ + "MOVPRFX", + "UABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaba_s16", + "SIMD_ISA": "SVE2", + "name": "svaba[_n_u32]", "arguments": [ - "int16x4_t a", - "int16x4_t b", - "int16x4_t c" + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" ], "return_type": { - "value": "int16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.S" }, - "c": { - "register": "Vm.4H" + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABA" + "UABA" + ], + [ + "MOVPRFX", + "UABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaba_s32", + "SIMD_ISA": "SVE2", + "name": "svaba[_n_u64]", "arguments": [ - "int32x2_t a", - "int32x2_t b", - "int32x2_t c" + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" ], "return_type": { - "value": "int32x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.D" }, - "c": { - "register": "Vm.2S" + "op3": { + "register": "Zop3.D[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABA" + "UABA" + ], + [ + "MOVPRFX", + "UABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaba_s8", + "SIMD_ISA": "SVE2", + "name": "svaba[_n_u8]", "arguments": [ - "int8x8_t a", - "int8x8_t b", - "int8x8_t c" + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" ], "return_type": { - "value": "int8x8_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.B" }, - "c": { - "register": "Vm.8B" + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABA" + "UABA" + ], + [ + "MOVPRFX", + "UABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaba_u16", + "SIMD_ISA": "SVE2", + "name": "svaba[_s16]", "arguments": [ - "uint16x4_t a", - "uint16x4_t b", - "uint16x4_t c" + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" ], "return_type": { - "value": "uint16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.H" }, - "c": { - "register": "Vm.4H" + "op3": { + "register": "Zop3.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABA" + "SABA" + ], + [ + "MOVPRFX", + "SABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaba_u32", + "SIMD_ISA": "SVE2", + "name": "svaba[_s32]", "arguments": [ - "uint32x2_t a", - "uint32x2_t b", - "uint32x2_t c" + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" ], "return_type": { - "value": "uint32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.S" }, - "c": { - "register": "Vm.2S" + "op3": { + "register": "Zop3.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABA" + "SABA" + ], + [ + "MOVPRFX", + "SABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaba_u8", + "SIMD_ISA": "SVE2", + "name": "svaba[_s64]", "arguments": [ - "uint8x8_t a", - "uint8x8_t b", - "uint8x8_t c" + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" ], "return_type": { - "value": "uint8x8_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D" }, - "c": { - "register": "Vm.8B" + "op3": { + "register": "Zop3.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABA" + "SABA" + ], + [ + "MOVPRFX", + "SABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_high_s16", + "SIMD_ISA": "SVE2", + "name": "svaba[_s8]", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16x8_t c" + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" ], "return_type": { - "value": "int32x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.B" }, - "c": { - "register": "Vm.8H" + "op3": { + "register": "Zop3.B" } }, "Architectures": [ @@ -583,30 +635,34 @@ ], "instructions": [ [ - "SABAL2" + "SABA" + ], + [ + "MOVPRFX", + "SABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_high_s32", + "SIMD_ISA": "SVE2", + "name": "svaba[_u16]", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32x4_t c" + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" ], "return_type": { - "value": "int64x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.4S" + "op2": { + "register": "Zop2.H" }, - "c": { - "register": "Vm.4S" + "op3": { + "register": "Zop3.H" } }, "Architectures": [ @@ -614,30 +670,34 @@ ], "instructions": [ [ - "SABAL2" + "UABA" + ], + [ + "MOVPRFX", + "UABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_high_s8", + "SIMD_ISA": "SVE2", + "name": "svaba[_u32]", "arguments": [ - "int16x8_t a", - "int8x16_t b", - "int8x16_t c" + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" ], "return_type": { - "value": "int16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.S" }, - "c": { - "register": "Vm.16B" + "op3": { + "register": "Zop3.S" } }, "Architectures": [ @@ -645,30 +705,34 @@ ], "instructions": [ [ - "SABAL2" + "UABA" + ], + [ + "MOVPRFX", + "UABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_high_u16", + "SIMD_ISA": "SVE2", + "name": "svaba[_u64]", "arguments": [ - "uint32x4_t a", - "uint16x8_t b", - "uint16x8_t c" + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.D" }, - "c": { - "register": "Vm.8H" + "op3": { + "register": "Zop3.D" } }, "Architectures": [ @@ -676,30 +740,34 @@ ], "instructions": [ [ - "UABAL2" + "UABA" + ], + [ + "MOVPRFX", + "UABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_high_u32", + "SIMD_ISA": "SVE2", + "name": "svaba[_u8]", "arguments": [ - "uint64x2_t a", - "uint32x4_t b", - "uint32x4_t c" + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vn.4S" + "op2": { + "register": "Zop2.B" }, - "c": { - "register": "Vm.4S" + "op3": { + "register": "Zop3.B" } }, "Architectures": [ @@ -707,30 +775,34 @@ ], "instructions": [ [ - "UABAL2" + "UABA" + ], + [ + "MOVPRFX", + "UABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_high_u8", + "SIMD_ISA": "SVE2", + "name": "svabalb[_n_s16]", "arguments": [ - "uint16x8_t a", - "uint8x16_t b", - "uint8x16_t c" + "svint16_t op1", + "svint8_t op2", + "int8_t op3" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.B" }, - "c": { - "register": "Vm.16B" + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ @@ -738,479 +810,524 @@ ], "instructions": [ [ - "UABAL2" + "SABALB" + ], + [ + "MOVPRFX", + "SABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_s16", + "SIMD_ISA": "SVE2", + "name": "svabalb[_n_s32]", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16x4_t c" + "svint32_t op1", + "svint16_t op2", + "int16_t op3" ], "return_type": { - "value": "int32x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.H" }, - "c": { - "register": "Vm.4H" + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABAL" + "SABALB" + ], + [ + "MOVPRFX", + "SABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_s32", + "SIMD_ISA": "SVE2", + "name": "svabalb[_n_s64]", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32x2_t c" + "svint64_t op1", + "svint32_t op2", + "int32_t op3" ], "return_type": { - "value": "int64x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.S" }, - "c": { - "register": "Vm.2S" + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABAL" + "SABALB" + ], + [ + "MOVPRFX", + "SABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_s8", + "SIMD_ISA": "SVE2", + "name": "svabalb[_n_u16]", "arguments": [ - "int16x8_t a", - "int8x8_t b", - "int8x8_t c" + "svuint16_t op1", + "svuint8_t op2", + "uint8_t op3" ], "return_type": { - "value": "int16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.B" }, - "c": { - "register": "Vm.8B" + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABAL" + "UABALB" + ], + [ + "MOVPRFX", + "UABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_u16", + "SIMD_ISA": "SVE2", + "name": "svabalb[_n_u32]", "arguments": [ - "uint32x4_t a", - "uint16x4_t b", - "uint16x4_t c" + "svuint32_t op1", + "svuint16_t op2", + "uint16_t op3" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.H" }, - "c": { - "register": "Vm.4H" + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABAL" + "UABALB" + ], + [ + "MOVPRFX", + "UABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_u32", + "SIMD_ISA": "SVE2", + "name": "svabalb[_n_u64]", "arguments": [ - "uint64x2_t a", - "uint32x2_t b", - "uint32x2_t c" + "svuint64_t op1", + "svuint32_t op2", + "uint32_t op3" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.S" }, - "c": { - "register": "Vm.2S" + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABAL" + "UABALB" + ], + [ + "MOVPRFX", + "UABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_u8", + "SIMD_ISA": "SVE2", + "name": "svabalb[_s16]", "arguments": [ - "uint16x8_t a", - "uint8x8_t b", - "uint8x8_t c" + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.B" }, - "c": { - "register": "Vm.8B" + "op3": { + "register": "Zop3.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABAL" + "SABALB" + ], + [ + "MOVPRFX", + "SABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabaq_s16", + "SIMD_ISA": "SVE2", + "name": "svabalb[_s32]", "arguments": [ - "int16x8_t a", - "int16x8_t b", - "int16x8_t c" + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" ], "return_type": { - "value": "int16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.H" }, - "c": { - "register": "Vm.8H" + "op3": { + "register": "Zop3.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABA" + "SABALB" + ], + [ + "MOVPRFX", + "SABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabaq_s32", + "SIMD_ISA": "SVE2", + "name": "svabalb[_s64]", "arguments": [ - "int32x4_t a", - "int32x4_t b", - "int32x4_t c" + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" ], "return_type": { - "value": "int32x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.4S" + "op2": { + "register": "Zop2.S" }, - "c": { - "register": "Vm.4S" + "op3": { + "register": "Zop3.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABA" + "SABALB" + ], + [ + "MOVPRFX", + "SABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabaq_s8", + "SIMD_ISA": "SVE2", + "name": "svabalb[_u16]", "arguments": [ - "int8x16_t a", - "int8x16_t b", - "int8x16_t c" + "svuint16_t op1", + "svuint8_t op2", + "svuint8_t op3" ], "return_type": { - "value": "int8x16_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.B" }, - "c": { - "register": "Vm.16B" + "op3": { + "register": "Zop3.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABA" + "UABALB" + ], + [ + "MOVPRFX", + "UABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabaq_u16", + "SIMD_ISA": "SVE2", + "name": "svabalb[_u32]", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", - "uint16x8_t c" + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.H" }, - "c": { - "register": "Vm.8H" + "op3": { + "register": "Zop3.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABA" + "UABALB" + ], + [ + "MOVPRFX", + "UABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabaq_u32", + "SIMD_ISA": "SVE2", + "name": "svabalb[_u64]", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c" + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.4S" + "op2": { + "register": "Zop2.S" }, - "c": { - "register": "Vm.4S" + "op3": { + "register": "Zop3.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABA" + "UABALB" + ], + [ + "MOVPRFX", + "UABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabaq_u8", + "SIMD_ISA": "SVE2", + "name": "svabalt[_n_s16]", "arguments": [ - "uint8x16_t a", - "uint8x16_t b", - "uint8x16_t c" + "svint16_t op1", + "svint8_t op2", + "int8_t op3" ], "return_type": { - "value": "uint8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.B" }, - "c": { - "register": "Vm.16B" + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABA" + "SABALT" + ], + [ + "MOVPRFX", + "SABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabd_f16", + "SIMD_ISA": "SVE2", + "name": "svabalt[_n_s32]", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svint32_t op1", + "svint16_t op2", + "int16_t op3" ], "return_type": { - "value": "float16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FABD" + "SABALT" + ], + [ + "MOVPRFX", + "SABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabd_f32", + "SIMD_ISA": "SVE2", + "name": "svabalt[_n_s64]", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svint64_t op1", + "svint32_t op2", + "int32_t op3" ], "return_type": { - "value": "float32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FABD" + "SABALT" + ], + [ + "MOVPRFX", + "SABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabd_f64", + "SIMD_ISA": "SVE2", + "name": "svabalt[_n_u16]", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svuint16_t op1", + "svuint8_t op2", + "uint8_t op3" ], "return_type": { - "value": "float64x1_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ @@ -1218,200 +1335,244 @@ ], "instructions": [ [ - "FABD" + "UABALT" + ], + [ + "MOVPRFX", + "UABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabd_s16", + "SIMD_ISA": "SVE2", + "name": "svabalt[_n_u32]", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svuint32_t op1", + "svuint16_t op2", + "uint16_t op3" ], "return_type": { - "value": "int16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABD" + "UABALT" + ], + [ + "MOVPRFX", + "UABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabd_s32", + "SIMD_ISA": "SVE2", + "name": "svabalt[_n_u64]", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svuint64_t op1", + "svuint32_t op2", + "uint32_t op3" ], "return_type": { - "value": "int32x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABD" + "UABALT" + ], + [ + "MOVPRFX", + "UABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabd_s8", + "SIMD_ISA": "SVE2", + "name": "svabalt[_s16]", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" ], "return_type": { - "value": "int8x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABD" + "SABALT" + ], + [ + "MOVPRFX", + "SABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabd_u16", + "SIMD_ISA": "SVE2", + "name": "svabalt[_s32]", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" ], "return_type": { - "value": "uint16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABD" + "SABALT" + ], + [ + "MOVPRFX", + "SABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabd_u32", + "SIMD_ISA": "SVE2", + "name": "svabalt[_s64]", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" ], "return_type": { - "value": "uint32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABD" + "SABALT" + ], + [ + "MOVPRFX", + "SABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabd_u8", + "SIMD_ISA": "SVE2", + "name": "svabalt[_u16]", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svuint16_t op1", + "svuint8_t op2", + "svuint8_t op3" ], "return_type": { - "value": "uint8x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABD" + "UABALT" + ], + [ + "MOVPRFX", + "UABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdd_f64", + "SIMD_ISA": "SVE2", + "name": "svabalt[_u32]", "arguments": [ - "float64_t a", - "float64_t b" + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3" ], "return_type": { - "value": "float64_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" } }, "Architectures": [ @@ -1419,26 +1580,34 @@ ], "instructions": [ [ - "FABD" + "UABALT" + ], + [ + "MOVPRFX", + "UABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdh_f16", + "SIMD_ISA": "SVE2", + "name": "svabalt[_u64]", "arguments": [ - "float16_t a", - "float16_t b" + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3" ], "return_type": { - "value": "float16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Hm" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" } }, "Architectures": [ @@ -1446,26 +1615,34 @@ ], "instructions": [ [ - "FABD" + "UABALT" + ], + [ + "MOVPRFX", + "UABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_high_s16", + "SIMD_ISA": "SVE", + "name": "svabd[_f16]_m", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -1473,26 +1650,34 @@ ], "instructions": [ [ - "SABDL2" + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_high_s32", + "SIMD_ISA": "SVE", + "name": "svabd[_f16]_x", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -1500,26 +1685,37 @@ ], "instructions": [ [ - "SABDL2" + "FABD" + ], + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_high_s8", + "SIMD_ISA": "SVE", + "name": "svabd[_f16]_z", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -1527,26 +1723,35 @@ ], "instructions": [ [ - "SABDL2" + "MOVPRFX", + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_high_u16", + "SIMD_ISA": "SVE", + "name": "svabd[_f32]_m", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -1554,26 +1759,34 @@ ], "instructions": [ [ - "UABDL2" + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_high_u32", + "SIMD_ISA": "SVE", + "name": "svabd[_f32]_x", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -1581,26 +1794,37 @@ ], "instructions": [ [ - "UABDL2" + "FABD" + ], + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_high_u8", + "SIMD_ISA": "SVE", + "name": "svabd[_f32]_z", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -1608,257 +1832,326 @@ ], "instructions": [ [ - "UABDL2" + "MOVPRFX", + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_s16", + "SIMD_ISA": "SVE", + "name": "svabd[_f64]_m", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABDL" + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_s32", + "SIMD_ISA": "SVE", + "name": "svabd[_f64]_x", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABDL" + "FABD" + ], + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_s8", + "SIMD_ISA": "SVE", + "name": "svabd[_f64]_z", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABDL" + "MOVPRFX", + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_u16", + "SIMD_ISA": "SVE", + "name": "svabd[_n_f16]_m", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABDL" + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_u32", + "SIMD_ISA": "SVE", + "name": "svabd[_n_f16]_x", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABDL" + "FABD" + ], + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_u8", + "SIMD_ISA": "SVE", + "name": "svabd[_n_f16]_z", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABDL" + "MOVPRFX", + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdq_f16", + "SIMD_ISA": "SVE", + "name": "svabd[_n_f32]_m", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdq_f32", + "SIMD_ISA": "SVE", + "name": "svabd[_n_f32]_x", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ "FABD" + ], + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdq_f64", + "SIMD_ISA": "SVE", + "name": "svabd[_n_f32]_z", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -1866,200 +2159,253 @@ ], "instructions": [ [ + "MOVPRFX", + "FABD" + ], + [ + "MOVPRFX", "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdq_s16", + "SIMD_ISA": "SVE", + "name": "svabd[_n_f64]_m", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABD" + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdq_s32", + "SIMD_ISA": "SVE", + "name": "svabd[_n_f64]_x", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABD" + "FABD" + ], + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdq_s8", + "SIMD_ISA": "SVE", + "name": "svabd[_n_f64]_z", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "int8x16_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABD" + "MOVPRFX", + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdq_u16", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s16]_m", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABD" + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdq_u32", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s16]_x", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABD" + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdq_u8", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s16]_z", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABD" + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabds_f32", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s32]_m", "arguments": [ - "float32_t a", - "float32_t b" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "float32_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Sm" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -2067,71 +2413,108 @@ ], "instructions": [ [ - "FABD" + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabs_f16", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s32]_x", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FABS" + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabs_f32", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s32]_z", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FABS" + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabs_f64", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s64]_m", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "float64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -2139,72 +2522,108 @@ ], "instructions": [ [ - "FABS" + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabs_s16", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s64]_x", "arguments": [ - "int16x4_t a" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ABS" + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabs_s32", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s64]_z", "arguments": [ - "int32x2_t a" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ABS" - ] - ] + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] }, { - "SIMD_ISA": "Neon", - "name": "vabs_s64", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s8]_m", "arguments": [ - "int64x1_t a" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "int64x1_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -2212,47 +2631,72 @@ ], "instructions": [ [ - "ABS" + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabs_s8", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s8]_x", "arguments": [ - "int8x8_t a" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ABS" + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabsd_s64", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s8]_z", "arguments": [ - "int64_t a" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "int64_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -2260,95 +2704,144 @@ ], "instructions": [ [ - "ABS" + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabsh_f16", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u16]_m", "arguments": [ - "float16_t a" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "float16_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FABS" + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabsq_f16", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u16]_x", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FABS" + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabsq_f32", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u16]_z", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FABS" + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabsq_f64", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u32]_m", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -2356,72 +2849,108 @@ ], "instructions": [ [ - "FABS" + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabsq_s16", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u32]_x", "arguments": [ - "int16x8_t a" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ABS" + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabsq_s32", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u32]_z", "arguments": [ - "int32x4_t a" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ABS" + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabsq_s64", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u64]_m", "arguments": [ - "int64x2_t a" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -2429,108 +2958,143 @@ ], "instructions": [ [ - "ABS" + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabsq_s8", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u64]_x", "arguments": [ - "int8x16_t a" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "int8x16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ABS" + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_f16", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u64]_z", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FADD" + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_f32", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u8]_m", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FADD" + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_f64", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u8]_x", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "float64x1_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -2538,345 +3102,435 @@ ], "instructions": [ [ - "FADD" + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_p16", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u8]_z", "arguments": [ - "poly16x4_t a", - "poly16x4_t b" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "poly16x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_p64", + "SIMD_ISA": "SVE", + "name": "svabd[_s16]_m", "arguments": [ - "poly64x1_t a", - "poly64x1_t b" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "poly64x1_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_p8", + "SIMD_ISA": "SVE", + "name": "svabd[_s16]_x", "arguments": [ - "poly8x8_t a", - "poly8x8_t b" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "poly8x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_s16", + "SIMD_ISA": "SVE", + "name": "svabd[_s16]_z", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_s32", + "SIMD_ISA": "SVE", + "name": "svabd[_s32]_m", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_s64", + "SIMD_ISA": "SVE", + "name": "svabd[_s32]_x", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "int64x1_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_s8", + "SIMD_ISA": "SVE", + "name": "svabd[_s32]_z", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_u16", + "SIMD_ISA": "SVE", + "name": "svabd[_s64]_m", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_u32", + "SIMD_ISA": "SVE", + "name": "svabd[_s64]_x", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_u64", + "SIMD_ISA": "SVE", + "name": "svabd[_s64]_z", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_u8", + "SIMD_ISA": "SVE", + "name": "svabd[_s8]_m", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddd_s64", + "SIMD_ISA": "SVE", + "name": "svabd[_s8]_x", "arguments": [ - "int64_t a", - "int64_t b" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "int64_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -2884,26 +3538,37 @@ ], "instructions": [ [ - "ADD" + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddd_u64", + "SIMD_ISA": "SVE", + "name": "svabd[_s8]_z", "arguments": [ - "uint64_t a", - "uint64_t b" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -2911,58 +3576,70 @@ ], "instructions": [ [ - "ADD" + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddh_f16", + "SIMD_ISA": "SVE", + "name": "svabd[_u16]_m", "arguments": [ - "float16_t a", - "float16_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "float16_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Hm" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FADD" + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_high_s16", + "SIMD_ISA": "SVE", + "name": "svabd[_u16]_x", "arguments": [ - "int8x8_t r", - "int16x8_t a", - "int16x8_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "int8x16_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H|Ztied2.H" }, - "r": { - "register": "Vd.8B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -2970,30 +3647,37 @@ ], "instructions": [ [ - "ADDHN2" + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_high_s32", + "SIMD_ISA": "SVE", + "name": "svabd[_u16]_z", "arguments": [ - "int16x4_t r", - "int32x4_t a", - "int32x4_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.H" }, - "r": { - "register": "Vd.4H" + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -3001,30 +3685,35 @@ ], "instructions": [ [ - "ADDHN2" + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_high_s64", + "SIMD_ISA": "SVE", + "name": "svabd[_u32]_m", "arguments": [ - "int32x2_t r", - "int64x2_t a", - "int64x2_t b" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.S" }, - "r": { - "register": "Vd.2S" + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -3032,30 +3721,34 @@ ], "instructions": [ [ - "ADDHN2" + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_high_u16", + "SIMD_ISA": "SVE", + "name": "svabd[_u32]_x", "arguments": [ - "uint8x8_t r", - "uint16x8_t a", - "uint16x8_t b" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.S|Ztied2.S" }, - "r": { - "register": "Vd.8B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -3063,30 +3756,37 @@ ], "instructions": [ [ - "ADDHN2" + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_high_u32", + "SIMD_ISA": "SVE", + "name": "svabd[_u32]_z", "arguments": [ - "uint16x4_t r", - "uint32x4_t a", - "uint32x4_t b" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S" }, - "r": { - "register": "Vd.4H" + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -3094,30 +3794,35 @@ ], "instructions": [ [ - "ADDHN2" + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_high_u64", + "SIMD_ISA": "SVE", + "name": "svabd[_u64]_m", "arguments": [ - "uint32x2_t r", - "uint64x2_t a", - "uint64x2_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.D" }, - "r": { - "register": "Vd.2S" + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -3125,200 +3830,240 @@ ], "instructions": [ [ - "ADDHN2" + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_s16", + "SIMD_ISA": "SVE", + "name": "svabd[_u64]_x", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDHN" + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_s32", + "SIMD_ISA": "SVE", + "name": "svabd[_u64]_z", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDHN" + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_s64", + "SIMD_ISA": "SVE", + "name": "svabd[_u8]_m", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDHN" + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_u16", + "SIMD_ISA": "SVE", + "name": "svabd[_u8]_x", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDHN" + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_u32", + "SIMD_ISA": "SVE", + "name": "svabd[_u8]_z", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDHN" + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_u64", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_n_s16]", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDHN" + "SABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_high_s16", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_n_s32]", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -3326,26 +4071,26 @@ ], "instructions": [ [ - "SADDL2" + "SABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_high_s32", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_n_s64]", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ @@ -3353,26 +4098,26 @@ ], "instructions": [ [ - "SADDL2" + "SABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_high_s8", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_n_u16]", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ @@ -3380,26 +4125,26 @@ ], "instructions": [ [ - "SADDL2" + "UABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_high_u16", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_n_u32]", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -3407,26 +4152,26 @@ ], "instructions": [ [ - "UADDL2" + "UABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_high_u32", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_n_u64]", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ @@ -3434,26 +4179,26 @@ ], "instructions": [ [ - "UADDL2" + "UABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_high_u8", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_s16]", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -3461,196 +4206,188 @@ ], "instructions": [ [ - "UADDL2" + "SABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_s16", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_s32]", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDL" + "SABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_s32", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_s64]", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDL" + "SABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_s8", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_u16]", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDL" + "UABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_u16", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_u32]", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDL" + "UABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_u32", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_u64]", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDL" + "UABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_u8", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_n_s16]", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDL" + "SABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlv_s16", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_n_s32]", "arguments": [ - "int16x4_t a" + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "int32_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -3658,22 +4395,26 @@ ], "instructions": [ [ - "SADDLV" + "SABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlv_s32", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_n_s64]", "arguments": [ - "int32x2_t a" + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "int64_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ @@ -3681,22 +4422,26 @@ ], "instructions": [ [ - "SADDLP" + "SABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlv_s8", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_n_u16]", "arguments": [ - "int8x8_t a" + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "int16_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ @@ -3704,22 +4449,26 @@ ], "instructions": [ [ - "SADDLV" + "UABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlv_u16", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_n_u32]", "arguments": [ - "uint16x4_t a" + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -3727,22 +4476,26 @@ ], "instructions": [ [ - "UADDLV" + "UABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlv_u32", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_n_u64]", "arguments": [ - "uint32x2_t a" + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ @@ -3750,22 +4503,26 @@ ], "instructions": [ [ - "UADDLP" + "UABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlv_u8", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_s16]", "arguments": [ - "uint8x8_t a" + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -3773,22 +4530,26 @@ ], "instructions": [ [ - "UADDLV" + "SABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlvq_s16", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_s32]", "arguments": [ - "int16x8_t a" + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "int32_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -3796,22 +4557,26 @@ ], "instructions": [ [ - "SADDLV" + "SABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlvq_s32", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_s64]", "arguments": [ - "int32x4_t a" + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "int64_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" } }, "Architectures": [ @@ -3819,22 +4584,26 @@ ], "instructions": [ [ - "SADDLV" + "SABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlvq_s8", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_u16]", "arguments": [ - "int8x16_t a" + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "int16_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -3842,22 +4611,26 @@ ], "instructions": [ [ - "SADDLV" + "UABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlvq_u16", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_u32]", "arguments": [ - "uint16x8_t a" + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -3865,22 +4638,26 @@ ], "instructions": [ [ - "UADDLV" + "UABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlvq_u32", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_u64]", "arguments": [ - "uint32x4_t a" + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" } }, "Architectures": [ @@ -3888,22 +4665,30 @@ ], "instructions": [ [ - "UADDLV" + "UABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlvq_u8", + "SIMD_ISA": "SVE", + "name": "svabs[_f16]_m", "arguments": [ - "uint8x16_t a" + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" ], "return_type": { - "value": "uint16_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -3911,83 +4696,93 @@ ], "instructions": [ [ - "UADDLV" + "FABS" + ], + [ + "MOVPRFX", + "FABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_f16", + "SIMD_ISA": "SVE", + "name": "svabs[_f16]_x", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svfloat16_t op" ], "return_type": { - "value": "float16x8_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op": { + "register": "Zop.H|Ztied.H" }, - "b": { - "register": "Vm.8H" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FADD" + "FABS" + ], + [ + "MOVPRFX", + "FABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_f32", + "SIMD_ISA": "SVE", + "name": "svabs[_f16]_z", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svfloat16_t op" ], "return_type": { - "value": "float32x4_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op": { + "register": "Zop.H" }, - "b": { - "register": "Vm.4S" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FADD" + "MOVPRFX", + "FABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_f64", + "SIMD_ISA": "SVE", + "name": "svabs[_f32]_m", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" ], "return_type": { - "value": "float64x2_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "inactive": { + "register": "Zinactive.S|Ztied.S" }, - "b": { - "register": "Vm.2D" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -3995,370 +4790,406 @@ ], "instructions": [ [ - "FADD" + "FABS" + ], + [ + "MOVPRFX", + "FABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_p128", + "SIMD_ISA": "SVE", + "name": "svabs[_f32]_x", "arguments": [ - "poly128_t a", - "poly128_t b" + "svbool_t pg", + "svfloat32_t op" ], "return_type": { - "value": "poly128_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.S|Ztied.S" }, - "b": { - "register": "Vm.16B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "FABS" + ], + [ + "MOVPRFX", + "FABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_p16", + "SIMD_ISA": "SVE", + "name": "svabs[_f32]_z", "arguments": [ - "poly16x8_t a", - "poly16x8_t b" + "svbool_t pg", + "svfloat32_t op" ], "return_type": { - "value": "poly16x8_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.S" }, - "b": { - "register": "Vm.16B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "MOVPRFX", + "FABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_p64", + "SIMD_ISA": "SVE", + "name": "svabs[_f64]_m", "arguments": [ - "poly64x2_t a", - "poly64x2_t b" + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" ], "return_type": { - "value": "poly64x2_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "inactive": { + "register": "Zinactive.D|Ztied.D" }, - "b": { - "register": "Vm.16B" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "FABS" + ], + [ + "MOVPRFX", + "FABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_p8", + "SIMD_ISA": "SVE", + "name": "svabs[_f64]_x", "arguments": [ - "poly8x16_t a", - "poly8x16_t b" + "svbool_t pg", + "svfloat64_t op" ], "return_type": { - "value": "poly8x16_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.D|Ztied.D" }, - "b": { - "register": "Vm.16B" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "FABS" + ], + [ + "MOVPRFX", + "FABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_s16", + "SIMD_ISA": "SVE", + "name": "svabs[_f64]_z", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svbool_t pg", + "svfloat64_t op" ], "return_type": { - "value": "int16x8_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op": { + "register": "Zop.D" }, - "b": { - "register": "Vm.8H" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "MOVPRFX", + "FABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_s32", + "SIMD_ISA": "SVE", + "name": "svabs[_s16]_m", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "int32x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "inactive": { + "register": "Zinactive.H|Ztied.H" }, - "b": { - "register": "Vm.4S" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "ABS" + ], + [ + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_s64", + "SIMD_ISA": "SVE", + "name": "svabs[_s16]_x", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "int64x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op": { + "register": "Zop.H|Ztied.H" }, - "b": { - "register": "Vm.2D" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "ABS" + ], + [ + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_s8", + "SIMD_ISA": "SVE", + "name": "svabs[_s16]_z", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "int8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.H" }, - "b": { - "register": "Vm.16B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_u16", + "SIMD_ISA": "SVE", + "name": "svabs[_s32]_m", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "inactive": { + "register": "Zinactive.S|Ztied.S" }, - "b": { - "register": "Vm.8H" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "ABS" + ], + [ + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_u32", + "SIMD_ISA": "SVE", + "name": "svabs[_s32]_x", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint32x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op": { + "register": "Zop.S|Ztied.S" }, - "b": { - "register": "Vm.4S" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "ABS" + ], + [ + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_u64", + "SIMD_ISA": "SVE", + "name": "svabs[_s32]_z", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint64x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op": { + "register": "Zop.S" }, - "b": { - "register": "Vm.2D" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_u8", + "SIMD_ISA": "SVE", + "name": "svabs[_s64]_m", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "uint8x16_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "inactive": { + "register": "Zinactive.D|Ztied.D" }, - "b": { - "register": "Vm.16B" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "ABS" + ], + [ + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddv_f32", + "SIMD_ISA": "SVE", + "name": "svabs[_s64]_x", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "float32_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -4366,22 +5197,30 @@ ], "instructions": [ [ - "FADDP" + "ABS" + ], + [ + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddv_s16", + "SIMD_ISA": "SVE", + "name": "svabs[_s64]_z", "arguments": [ - "int16x4_t a" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "int16_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -4389,22 +5228,31 @@ ], "instructions": [ [ - "ADDV" + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddv_s32", + "SIMD_ISA": "SVE", + "name": "svabs[_s8]_m", "arguments": [ - "int32x2_t a" + "svint8_t inactive", + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "int32_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -4412,22 +5260,30 @@ ], "instructions": [ [ - "ADDP" + "ABS" + ], + [ + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddv_s8", + "SIMD_ISA": "SVE", + "name": "svabs[_s8]_x", "arguments": [ - "int8x8_t a" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "int8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -4435,22 +5291,30 @@ ], "instructions": [ [ - "ADDV" + "ABS" + ], + [ + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddv_u16", + "SIMD_ISA": "SVE", + "name": "svabs[_s8]_z", "arguments": [ - "uint16x4_t a" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "uint16_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -4458,22 +5322,31 @@ ], "instructions": [ [ - "ADDV" + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddv_u32", + "SIMD_ISA": "SVE", + "name": "svacge[_f16]", "arguments": [ - "uint32x2_t a" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -4481,22 +5354,30 @@ ], "instructions": [ [ - "ADDP" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddv_u8", + "SIMD_ISA": "SVE", + "name": "svacge[_f32]", "arguments": [ - "uint8x8_t a" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "uint8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -4504,22 +5385,30 @@ ], "instructions": [ [ - "ADDV" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddvq_f32", + "SIMD_ISA": "SVE", + "name": "svacge[_f64]", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "float32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -4527,23 +5416,30 @@ ], "instructions": [ [ - "FADDP", - "FADDP" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddvq_f64", + "SIMD_ISA": "SVE", + "name": "svacge[_n_f16]", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "float64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -4551,22 +5447,30 @@ ], "instructions": [ [ - "FADDP" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddvq_s16", + "SIMD_ISA": "SVE", + "name": "svacge[_n_f32]", "arguments": [ - "int16x8_t a" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "int16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -4574,22 +5478,30 @@ ], "instructions": [ [ - "ADDV" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddvq_s32", + "SIMD_ISA": "SVE", + "name": "svacge[_n_f64]", "arguments": [ - "int32x4_t a" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "int32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -4597,22 +5509,30 @@ ], "instructions": [ [ - "ADDV" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddvq_s64", + "SIMD_ISA": "SVE", + "name": "svacgt[_f16]", "arguments": [ - "int64x2_t a" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "int64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -4620,22 +5540,30 @@ ], "instructions": [ [ - "ADDP" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddvq_s8", + "SIMD_ISA": "SVE", + "name": "svacgt[_f32]", "arguments": [ - "int8x16_t a" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "int8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -4643,22 +5571,30 @@ ], "instructions": [ [ - "ADDV" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddvq_u16", + "SIMD_ISA": "SVE", + "name": "svacgt[_f64]", "arguments": [ - "uint16x8_t a" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -4666,22 +5602,30 @@ ], "instructions": [ [ - "ADDV" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddvq_u32", + "SIMD_ISA": "SVE", + "name": "svacgt[_n_f16]", "arguments": [ - "uint32x4_t a" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -4689,22 +5633,30 @@ ], "instructions": [ [ - "ADDV" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddvq_u64", + "SIMD_ISA": "SVE", + "name": "svacgt[_n_f32]", "arguments": [ - "uint64x2_t a" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -4712,22 +5664,30 @@ ], "instructions": [ [ - "ADDP" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddvq_u8", + "SIMD_ISA": "SVE", + "name": "svacgt[_n_f64]", "arguments": [ - "uint8x16_t a" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "uint8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -4735,26 +5695,30 @@ ], "instructions": [ [ - "ADDV" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_high_s16", + "SIMD_ISA": "SVE", + "name": "svacle[_f16]", "arguments": [ - "int32x4_t a", - "int16x8_t b" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -4762,26 +5726,30 @@ ], "instructions": [ [ - "SADDW2" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_high_s32", + "SIMD_ISA": "SVE", + "name": "svacle[_f32]", "arguments": [ - "int64x2_t a", - "int32x4_t b" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -4789,26 +5757,30 @@ ], "instructions": [ [ - "SADDW2" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_high_s8", + "SIMD_ISA": "SVE", + "name": "svacle[_f64]", "arguments": [ - "int16x8_t a", - "int8x16_t b" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -4816,26 +5788,30 @@ ], "instructions": [ [ - "SADDW2" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_high_u16", + "SIMD_ISA": "SVE", + "name": "svacle[_n_f16]", "arguments": [ - "uint32x4_t a", - "uint16x8_t b" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -4843,26 +5819,30 @@ ], "instructions": [ [ - "UADDW2" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_high_u32", + "SIMD_ISA": "SVE", + "name": "svacle[_n_f32]", "arguments": [ - "uint64x2_t a", - "uint32x4_t b" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -4870,26 +5850,30 @@ ], "instructions": [ [ - "UADDW2" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_high_u8", + "SIMD_ISA": "SVE", + "name": "svacle[_n_f64]", "arguments": [ - "uint16x8_t a", - "uint8x16_t b" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -4897,1518 +5881,1906 @@ ], "instructions": [ [ - "UADDW2" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_s16", + "SIMD_ISA": "SVE", + "name": "svaclt[_f16]", "arguments": [ - "int32x4_t a", - "int16x4_t b" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDW" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_s32", + "SIMD_ISA": "SVE", + "name": "svaclt[_f32]", "arguments": [ - "int64x2_t a", - "int32x2_t b" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDW" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_s8", + "SIMD_ISA": "SVE", + "name": "svaclt[_f64]", "arguments": [ - "int16x8_t a", - "int8x8_t b" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDW" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_u16", + "SIMD_ISA": "SVE", + "name": "svaclt[_n_f16]", "arguments": [ - "uint32x4_t a", - "uint16x4_t b" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDW" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_u32", + "SIMD_ISA": "SVE", + "name": "svaclt[_n_f32]", "arguments": [ - "uint64x2_t a", - "uint32x2_t b" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDW" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_u8", + "SIMD_ISA": "SVE", + "name": "svaclt[_n_f64]", "arguments": [ - "uint16x8_t a", - "uint8x8_t b" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDW" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaesdq_u8", + "SIMD_ISA": "SVE2", + "name": "svadalp[_s16]_m", "arguments": [ - "uint8x16_t data", - "uint8x16_t key" + "svbool_t pg", + "svint16_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "data": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "key": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "AESD" + "SADALP" + ], + [ + "MOVPRFX", + "SADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaeseq_u8", + "SIMD_ISA": "SVE2", + "name": "svadalp[_s16]_x", "arguments": [ - "uint8x16_t data", - "uint8x16_t key" + "svbool_t pg", + "svint16_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "data": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "key": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "AESE" + "SADALP" + ], + [ + "MOVPRFX", + "SADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaesimcq_u8", + "SIMD_ISA": "SVE2", + "name": "svadalp[_s16]_z", "arguments": [ - "uint8x16_t data" + "svbool_t pg", + "svint16_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "data": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "AESIMC" + "MOVPRFX", + "SADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaesmcq_u8", + "SIMD_ISA": "SVE2", + "name": "svadalp[_s32]_m", "arguments": [ - "uint8x16_t data" + "svbool_t pg", + "svint32_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "data": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "AESMC" + "SADALP" + ], + [ + "MOVPRFX", + "SADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vand_s16", + "SIMD_ISA": "SVE2", + "name": "svadalp[_s32]_x", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svbool_t pg", + "svint32_t op1", + "svint16_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "SADALP" + ], + [ + "MOVPRFX", + "SADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vand_s32", + "SIMD_ISA": "SVE2", + "name": "svadalp[_s32]_z", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svbool_t pg", + "svint32_t op1", + "svint16_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "MOVPRFX", + "SADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vand_s64", + "SIMD_ISA": "SVE2", + "name": "svadalp[_s64]_m", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "svbool_t pg", + "svint64_t op1", + "svint32_t op2" ], "return_type": { - "value": "int64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "SADALP" + ], + [ + "MOVPRFX", + "SADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vand_s8", + "SIMD_ISA": "SVE2", + "name": "svadalp[_s64]_x", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svbool_t pg", + "svint64_t op1", + "svint32_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "SADALP" + ], + [ + "MOVPRFX", + "SADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vand_u16", + "SIMD_ISA": "SVE2", + "name": "svadalp[_s64]_z", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svbool_t pg", + "svint64_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "MOVPRFX", + "SADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vand_u32", + "SIMD_ISA": "SVE2", + "name": "svadalp[_u16]_m", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "UADALP" + ], + [ + "MOVPRFX", + "UADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vand_u64", + "SIMD_ISA": "SVE2", + "name": "svadalp[_u16]_x", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "UADALP" + ], + [ + "MOVPRFX", + "UADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vand_u8", + "SIMD_ISA": "SVE2", + "name": "svadalp[_u16]_z", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "MOVPRFX", + "UADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vandq_s16", + "SIMD_ISA": "SVE2", + "name": "svadalp[_u32]_m", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svbool_t pg", + "svuint32_t op1", + "svuint16_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "UADALP" + ], + [ + "MOVPRFX", + "UADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vandq_s32", + "SIMD_ISA": "SVE2", + "name": "svadalp[_u32]_x", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svbool_t pg", + "svuint32_t op1", + "svuint16_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "UADALP" + ], + [ + "MOVPRFX", + "UADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vandq_s64", + "SIMD_ISA": "SVE2", + "name": "svadalp[_u32]_z", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "svbool_t pg", + "svuint32_t op1", + "svuint16_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "MOVPRFX", + "UADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vandq_s8", + "SIMD_ISA": "SVE2", + "name": "svadalp[_u64]_m", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint32_t op2" ], "return_type": { - "value": "int8x16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "UADALP" + ], + [ + "MOVPRFX", + "UADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vandq_u16", + "SIMD_ISA": "SVE2", + "name": "svadalp[_u64]_x", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "UADALP" + ], + [ + "MOVPRFX", + "UADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vandq_u32", + "SIMD_ISA": "SVE2", + "name": "svadalp[_u64]_z", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "MOVPRFX", + "UADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vandq_u64", + "SIMD_ISA": "SVE2", + "name": "svadclb[_n_u32]", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "ADCLB" + ], + [ + "MOVPRFX", + "ADCLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vandq_u8", + "SIMD_ISA": "SVE2", + "name": "svadclb[_n_u64]", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "ADCLB" + ], + [ + "MOVPRFX", + "ADCLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbcaxq_s16", + "SIMD_ISA": "SVE2", + "name": "svadclb[_u32]", "arguments": [ - "int16x8_t a", - "int16x8_t b", - "int16x8_t c" + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" ], "return_type": { - "value": "int16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" }, - "b": {}, - "c": {} + "op3": { + "register": "Zop3.S" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "BCAX" + "ADCLB" + ], + [ + "MOVPRFX", + "ADCLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbcaxq_s32", + "SIMD_ISA": "SVE2", + "name": "svadclb[_u64]", "arguments": [ - "int32x4_t a", - "int32x4_t b", - "int32x4_t c" + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" ], "return_type": { - "value": "int32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" }, - "b": {}, - "c": {} + "op3": { + "register": "Zop3.D" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "BCAX" + "ADCLB" + ], + [ + "MOVPRFX", + "ADCLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbcaxq_s64", + "SIMD_ISA": "SVE2", + "name": "svadclt[_n_u32]", "arguments": [ - "int64x2_t a", - "int64x2_t b", - "int64x2_t c" + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" ], "return_type": { - "value": "int64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" }, - "b": {}, - "c": {} + "op3": { + "register": "Zop3.S[*]" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "BCAX" + "ADCLT" + ], + [ + "MOVPRFX", + "ADCLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbcaxq_s8", + "SIMD_ISA": "SVE2", + "name": "svadclt[_n_u64]", "arguments": [ - "int8x16_t a", - "int8x16_t b", - "int8x16_t c" + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" ], "return_type": { - "value": "int8x16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" }, - "b": {}, - "c": {} + "op3": { + "register": "Zop3.D[*]" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "BCAX" + "ADCLT" + ], + [ + "MOVPRFX", + "ADCLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbcaxq_u16", + "SIMD_ISA": "SVE2", + "name": "svadclt[_u32]", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", - "uint16x8_t c" + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" }, - "b": {}, - "c": {} + "op3": { + "register": "Zop3.S" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "BCAX" + "ADCLT" + ], + [ + "MOVPRFX", + "ADCLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbcaxq_u32", + "SIMD_ISA": "SVE2", + "name": "svadclt[_u64]", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c" + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" }, - "b": {}, - "c": {} + "op3": { + "register": "Zop3.D" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "BCAX" + "ADCLT" + ], + [ + "MOVPRFX", + "ADCLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbcaxq_u64", + "SIMD_ISA": "SVE", + "name": "svadd[_f16]_m", "arguments": [ - "uint64x2_t a", - "uint64x2_t b", - "uint64x2_t c" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" }, - "b": {}, - "c": {} + "pg": { + "register": "Pg.H" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "BCAX" + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbcaxq_u8", + "SIMD_ISA": "SVE", + "name": "svadd[_f16]_x", "arguments": [ - "uint8x16_t a", - "uint8x16_t b", - "uint8x16_t c" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" }, - "b": {}, - "c": {} + "pg": { + "register": "Pg.H" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "BCAX" + "FADD" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbic_s16", + "SIMD_ISA": "SVE", + "name": "svadd[_f16]_z", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbic_s32", + "SIMD_ISA": "SVE", + "name": "svadd[_f32]_m", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbic_s64", + "SIMD_ISA": "SVE", + "name": "svadd[_f32]_x", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "int64x1_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "FADD" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbic_s8", + "SIMD_ISA": "SVE", + "name": "svadd[_f32]_z", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbic_u16", + "SIMD_ISA": "SVE", + "name": "svadd[_f64]_m", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbic_u32", + "SIMD_ISA": "SVE", + "name": "svadd[_f64]_x", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "FADD" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbic_u64", + "SIMD_ISA": "SVE", + "name": "svadd[_f64]_z", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbic_u8", + "SIMD_ISA": "SVE", + "name": "svadd[_n_f16]_m", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Ztied1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "FADD" + ], + [ + "FSUB" + ], + [ + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbicq_s16", + "SIMD_ISA": "SVE", + "name": "svadd[_n_f16]_x", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "FADD" + ], + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbicq_s32", + "SIMD_ISA": "SVE", + "name": "svadd[_n_f16]_z", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbicq_s64", + "SIMD_ISA": "SVE", + "name": "svadd[_n_f32]_m", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Ztied1.S" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "FADD" + ], + [ + "FSUB" + ], + [ + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbicq_s8", + "SIMD_ISA": "SVE", + "name": "svadd[_n_f32]_x", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "int8x16_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "FADD" + ], + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbicq_u16", + "SIMD_ISA": "SVE", + "name": "svadd[_n_f32]_z", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vbicq_u32", + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_f64]_m", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Ztied1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "FADD" + ], + [ + "FSUB" + ], + [ + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbicq_u64", + "SIMD_ISA": "SVE", + "name": "svadd[_n_f64]_x", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "FADD" + ], + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbicq_u8", + "SIMD_ISA": "SVE", + "name": "svadd[_n_f64]_z", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_f16", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s16]_m", "arguments": [ - "uint16x4_t a", - "float16x4_t b", - "float16x4_t c" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.H[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_f32", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s16]_x", "arguments": [ - "uint32x2_t a", - "float32x2_t b", - "float32x2_t c" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_f64", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s16]_z", "arguments": [ - "uint64x1_t a", - "float64x1_t b", - "float64x1_t c" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "float64x1_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.H[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -6416,458 +7788,526 @@ ], "instructions": [ [ - "BSL" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_p16", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s32]_m", "arguments": [ - "uint16x4_t a", - "poly16x4_t b", - "poly16x4_t c" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "poly16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.S[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_p64", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s32]_x", "arguments": [ - "poly64x1_t a", - "poly64x1_t b", - "poly64x1_t c" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "poly64x1_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_p8", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s32]_z", "arguments": [ - "uint8x8_t a", - "poly8x8_t b", - "poly8x8_t c" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "poly8x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.S[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_s16", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s64]_m", "arguments": [ - "uint16x4_t a", - "int16x4_t b", - "int16x4_t c" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_s32", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s64]_x", "arguments": [ - "uint32x2_t a", - "int32x2_t b", - "int32x2_t c" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_s64", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s64]_z", "arguments": [ - "uint64x1_t a", - "int64x1_t b", - "int64x1_t c" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "int64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_s8", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s8]_m", "arguments": [ - "uint8x8_t a", - "int8x8_t b", - "int8x8_t c" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.B[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_u16", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s8]_x", "arguments": [ - "uint16x4_t a", - "uint16x4_t b", - "uint16x4_t c" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_u32", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s8]_z", "arguments": [ - "uint32x2_t a", - "uint32x2_t b", - "uint32x2_t c" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.B[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_u64", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u16]_m", "arguments": [ - "uint64x1_t a", - "uint64x1_t b", - "uint64x1_t c" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.H[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_u8", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u16]_x", "arguments": [ - "uint8x8_t a", - "uint8x8_t b", - "uint8x8_t c" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_f16", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u16]_z", "arguments": [ - "uint16x8_t a", - "float16x8_t b", - "float16x8_t c" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.H[*]" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_f32", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u32]_m", "arguments": [ - "uint32x4_t a", - "float32x4_t b", - "float32x4_t c" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.S[*]" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_f64", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u32]_x", "arguments": [ - "uint64x2_t a", - "float64x2_t b", - "float64x2_t c" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -6875,556 +8315,665 @@ ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_p16", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u32]_z", "arguments": [ - "uint16x8_t a", - "poly16x8_t b", - "poly16x8_t c" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "poly16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.S[*]" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_p64", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u64]_m", "arguments": [ - "poly64x2_t a", - "poly64x2_t b", - "poly64x2_t c" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "poly64x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.D[*]" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_p8", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u64]_x", "arguments": [ - "uint8x16_t a", - "poly8x16_t b", - "poly8x16_t c" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "poly8x16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_s16", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u64]_z", "arguments": [ - "uint16x8_t a", - "int16x8_t b", - "int16x8_t c" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.D[*]" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_s32", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u8]_m", "arguments": [ - "uint32x4_t a", - "int32x4_t b", - "int32x4_t c" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.B[*]" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_s64", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u8]_x", "arguments": [ - "uint64x2_t a", - "int64x2_t b", - "int64x2_t c" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_s8", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u8]_z", "arguments": [ - "uint8x16_t a", - "int8x16_t b", - "int8x16_t c" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "int8x16_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.B[*]" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_u16", + "SIMD_ISA": "SVE", + "name": "svadd[_s16]_m", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", - "uint16x8_t c" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.H" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_u32", + "SIMD_ISA": "SVE", + "name": "svadd[_s16]_x", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.H|Ztied2.H" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_u64", + "SIMD_ISA": "SVE", + "name": "svadd[_s16]_z", "arguments": [ - "uint64x2_t a", - "uint64x2_t b", - "uint64x2_t c" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.H" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_u8", + "SIMD_ISA": "SVE", + "name": "svadd[_s32]_m", "arguments": [ - "uint8x16_t a", - "uint8x16_t b", - "uint8x16_t c" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.S" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcadd_rot270_f16", + "SIMD_ISA": "SVE", + "name": "svadd[_s32]_x", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H " + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCADD" + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcadd_rot270_f32", + "SIMD_ISA": "SVE", + "name": "svadd[_s32]_z", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S " + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCADD" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcadd_rot90_f16", + "SIMD_ISA": "SVE", + "name": "svadd[_s64]_m", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H " + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCADD" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcadd_rot90_f32", + "SIMD_ISA": "SVE", + "name": "svadd[_s64]_x", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S " + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCADD" + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaddq_rot270_f16", + "SIMD_ISA": "SVE", + "name": "svadd[_s64]_z", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H " + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCADD" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaddq_rot270_f32", + "SIMD_ISA": "SVE", + "name": "svadd[_s8]_m", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S " + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCADD" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaddq_rot270_f64", + "SIMD_ISA": "SVE", + "name": "svadd[_s8]_x", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D " + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -7432,82 +8981,107 @@ ], "instructions": [ [ - "FCADD" + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaddq_rot90_f16", + "SIMD_ISA": "SVE", + "name": "svadd[_s8]_z", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H " + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCADD" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaddq_rot90_f32", + "SIMD_ISA": "SVE", + "name": "svadd[_u16]_m", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S " + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCADD" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaddq_rot90_f64", + "SIMD_ISA": "SVE", + "name": "svadd[_u16]_x", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D " + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -7515,83 +9089,107 @@ ], "instructions": [ [ - "FCADD" + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcage_f16", + "SIMD_ISA": "SVE", + "name": "svadd[_u16]_z", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FACGE" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcage_f32", + "SIMD_ISA": "SVE", + "name": "svadd[_u32]_m", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FACGE" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcage_f64", + "SIMD_ISA": "SVE", + "name": "svadd[_u32]_x", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -7599,26 +9197,36 @@ ], "instructions": [ [ - "FACGE" + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaged_f64", + "SIMD_ISA": "SVE", + "name": "svadd[_u32]_z", "arguments": [ - "float64_t a", - "float64_t b" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -7626,26 +9234,35 @@ ], "instructions": [ [ - "FACGE" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcageh_f16", + "SIMD_ISA": "SVE", + "name": "svadd[_u64]_m", "arguments": [ - "float16_t a", - "float16_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Hm" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -7653,83 +9270,107 @@ ], "instructions": [ [ - "FACGE" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcageq_f16", + "SIMD_ISA": "SVE", + "name": "svadd[_u64]_x", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FACGE" + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcageq_f32", + "SIMD_ISA": "SVE", + "name": "svadd[_u64]_z", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FACGE" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcageq_f64", + "SIMD_ISA": "SVE", + "name": "svadd[_u8]_m", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -7737,26 +9378,34 @@ ], "instructions": [ [ - "FACGE" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcages_f32", + "SIMD_ISA": "SVE", + "name": "svadd[_u8]_x", "arguments": [ - "float32_t a", - "float32_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Sm" + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -7764,83 +9413,103 @@ ], "instructions": [ [ - "FACGE" + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcagt_f16", + "SIMD_ISA": "SVE", + "name": "svadd[_u8]_z", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FACGT" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcagt_f32", + "SIMD_ISA": "SVE", + "name": "svadda[_f16]", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "float16_t initial", + "svfloat16_t op" ], "return_type": { - "value": "uint32x2_t" + "value": "float16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "initial": { + "register": "Htied" }, - "b": { - "register": "Vm.2S" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FACGT" + "FADDA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcagt_f64", + "SIMD_ISA": "SVE", + "name": "svadda[_f32]", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svbool_t pg", + "float32_t initial", + "svfloat32_t op" ], "return_type": { - "value": "uint64x1_t" + "value": "float32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "initial": { + "register": "Stied" }, - "b": { - "register": "Dm" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -7848,26 +9517,30 @@ ], "instructions": [ [ - "FACGT" + "FADDA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcagtd_f64", + "SIMD_ISA": "SVE", + "name": "svadda[_f64]", "arguments": [ - "float64_t a", - "float64_t b" + "svbool_t pg", + "float64_t initial", + "svfloat64_t op" ], "return_type": { - "value": "uint64_t" + "value": "float64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "initial": { + "register": "Dtied" }, - "b": { - "register": "Dm" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -7875,26 +9548,26 @@ ], "instructions": [ [ - "FACGT" + "FADDA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcagth_f16", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_n_s16]", "arguments": [ - "float16_t a", - "float16_t b" + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Hm" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -7902,83 +9575,80 @@ ], "instructions": [ [ - "FACGT" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcagtq_f16", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_n_s32]", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FACGT" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcagtq_f32", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_n_s64]", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.D[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FACGT" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcagtq_f64", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_n_u16]", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -7986,26 +9656,26 @@ ], "instructions": [ [ - "FACGT" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcagts_f32", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_n_u32]", "arguments": [ - "float32_t a", - "float32_t b" + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Sm" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ @@ -8013,83 +9683,80 @@ ], "instructions": [ [ - "FACGT" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcale_f16", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_n_u64]", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.D[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FACGE" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcale_f32", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_s16]", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FACGE" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcale_f64", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_s32]", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ @@ -8097,26 +9764,26 @@ ], "instructions": [ [ - "FACGE" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaled_f64", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_s64]", "arguments": [ - "float64_t a", - "float64_t b" + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.D" } }, "Architectures": [ @@ -8124,26 +9791,26 @@ ], "instructions": [ [ - "FACGE" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaleh_f16", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_u16]", "arguments": [ - "float16_t a", - "float16_t b" + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Hm" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -8151,83 +9818,84 @@ ], "instructions": [ [ - "FACGE" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaleq_f16", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_u32]", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FACGE" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaleq_f32", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_u64]", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FACGE" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaleq_f64", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_n_s16]", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svint8_t even", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "even": { + "register": "Ztied.B" }, - "b": { - "register": "Vm.2D" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -8235,26 +9903,30 @@ ], "instructions": [ [ - "FACGE" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcales_f32", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_n_s32]", "arguments": [ - "float32_t a", - "float32_t b" + "svint16_t even", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "even": { + "register": "Ztied.H" }, - "b": { - "register": "Sm" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ @@ -8262,83 +9934,92 @@ ], "instructions": [ [ - "FACGE" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcalt_f16", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_n_s64]", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svint32_t even", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "even": { + "register": "Ztied.S" }, - "b": { - "register": "Vm.4H" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FACGT" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcalt_f32", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_n_u16]", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svuint8_t even", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "even": { + "register": "Ztied.B" }, - "b": { - "register": "Vm.2S" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FACGT" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcalt_f64", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_n_u32]", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svuint16_t even", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "even": { + "register": "Ztied.H" }, - "b": { - "register": "Dm" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ @@ -8346,26 +10027,30 @@ ], "instructions": [ [ - "FACGT" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaltd_f64", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_n_u64]", "arguments": [ - "float64_t a", - "float64_t b" + "svuint32_t even", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "even": { + "register": "Ztied.S" }, - "b": { - "register": "Dm" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" } }, "Architectures": [ @@ -8373,26 +10058,30 @@ ], "instructions": [ [ - "FACGT" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcalth_f16", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_s16]", "arguments": [ - "float16_t a", - "float16_t b" + "svint8_t even", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "even": { + "register": "Ztied.B" }, - "b": { - "register": "Hm" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -8400,83 +10089,92 @@ ], "instructions": [ [ - "FACGT" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaltq_f16", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_s32]", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svint16_t even", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "even": { + "register": "Ztied.H" }, - "b": { - "register": "Vm.8H" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FACGT" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaltq_f32", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_s64]", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svint32_t even", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "even": { + "register": "Ztied.S" }, - "b": { - "register": "Vm.4S" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FACGT" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaltq_f64", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_u16]", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svuint8_t even", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "even": { + "register": "Ztied.B" }, - "b": { - "register": "Vm.2D" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -8484,26 +10182,30 @@ ], "instructions": [ [ - "FACGT" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcalts_f32", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_u32]", "arguments": [ - "float32_t a", - "float32_t b" + "svuint16_t even", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "even": { + "register": "Ztied.H" }, - "b": { - "register": "Sm" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" } }, "Architectures": [ @@ -8511,83 +10213,84 @@ ], "instructions": [ [ - "FACGT" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_f16", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_u64]", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svuint32_t even", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "even": { + "register": "Ztied.S" }, - "b": { - "register": "Vm.4H" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMEQ" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_f32", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_n_s16]", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCMEQ" + "SADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_f64", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_n_s32]", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -8595,141 +10298,134 @@ ], "instructions": [ [ - "FCMEQ" + "SADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_p64", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_n_s64]", "arguments": [ - "poly64x1_t a", - "poly64x1_t b" + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "SADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_p8", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_n_u16]", "arguments": [ - "poly8x8_t a", - "poly8x8_t b" + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "UADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_s16", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_n_u32]", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "UADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_s32", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_n_u64]", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "UADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_s64", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_s16]", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -8737,113 +10433,107 @@ ], "instructions": [ [ - "CMEQ" + "SADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_s8", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_s32]", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "SADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_u16", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_s64]", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "SADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_u32", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_u16]", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "UADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_u64", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_u32]", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -8851,55 +10541,53 @@ ], "instructions": [ [ - "CMEQ" + "UADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_u8", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_u64]", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "UADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqd_f64", + "SIMD_ISA": "SVE2", + "name": "svaddlbt[_n_s16]", "arguments": [ - "float64_t a", - "float64_t b" + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ @@ -8907,26 +10595,26 @@ ], "instructions": [ [ - "FCMEQ" + "SADDLBT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqd_s64", + "SIMD_ISA": "SVE2", + "name": "svaddlbt[_n_s32]", "arguments": [ - "int64_t a", - "int64_t b" + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -8934,26 +10622,26 @@ ], "instructions": [ [ - "CMEQ" + "SADDLBT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqd_u64", + "SIMD_ISA": "SVE2", + "name": "svaddlbt[_n_s64]", "arguments": [ - "uint64_t a", - "uint64_t b" + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ @@ -8961,26 +10649,26 @@ ], "instructions": [ [ - "CMEQ" + "SADDLBT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqh_f16", + "SIMD_ISA": "SVE2", + "name": "svaddlbt[_s16]", "arguments": [ - "float16_t a", - "float16_t b" + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Hm" + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -8988,83 +10676,80 @@ ], "instructions": [ [ - "FCMEQ" + "SADDLBT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_f16", + "SIMD_ISA": "SVE2", + "name": "svaddlbt[_s32]", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMEQ" + "SADDLBT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_f32", + "SIMD_ISA": "SVE2", + "name": "svaddlbt[_s64]", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCMEQ" + "SADDLBT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_f64", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_n_s16]", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ @@ -9072,141 +10757,134 @@ ], "instructions": [ [ - "FCMEQ" + "SADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_p64", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_n_s32]", "arguments": [ - "poly64x2_t a", - "poly64x2_t b" + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "SADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_p8", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_n_s64]", "arguments": [ - "poly8x16_t a", - "poly8x16_t b" + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "SADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_s16", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_n_u16]", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "UADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_s32", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_n_u32]", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "UADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_s64", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_n_u64]", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ @@ -9214,113 +10892,107 @@ ], "instructions": [ [ - "CMEQ" + "UADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_s8", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_s16]", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "SADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_u16", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_s32]", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "SADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_u32", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_s64]", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "SADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_u64", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_u16]", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -9328,55 +11000,53 @@ ], "instructions": [ [ - "CMEQ" + "UADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_u8", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_u32]", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "UADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqs_f32", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_u64]", "arguments": [ - "float32_t a", - "float32_t b" + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Sm" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ @@ -9384,46 +11054,65 @@ ], "instructions": [ [ - "FCMEQ" + "UADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_f16", + "SIMD_ISA": "SVE2", + "name": "svaddp[_f16]_m", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMEQ" + "FADDP" + ], + [ + "MOVPRFX", + "FADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_f32", + "SIMD_ISA": "SVE2", + "name": "svaddp[_f16]_x", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -9431,22 +11120,34 @@ ], "instructions": [ [ - "FCMEQ" + "FADDP" + ], + [ + "MOVPRFX", + "FADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_f64", + "SIMD_ISA": "SVE2", + "name": "svaddp[_f32]_m", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -9454,46 +11155,69 @@ ], "instructions": [ [ - "FCMEQ" + "FADDP" + ], + [ + "MOVPRFX", + "FADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_p64", + "SIMD_ISA": "SVE2", + "name": "svaddp[_f32]_x", "arguments": [ - "poly64x1_t a" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "FADDP" + ], + [ + "MOVPRFX", + "FADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_p8", + "SIMD_ISA": "SVE2", + "name": "svaddp[_f64]_m", "arguments": [ - "poly8x8_t a" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -9501,22 +11225,34 @@ ], "instructions": [ [ - "CMEQ" + "FADDP" + ], + [ + "MOVPRFX", + "FADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_s16", + "SIMD_ISA": "SVE2", + "name": "svaddp[_f64]_x", "arguments": [ - "int16x4_t a" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -9524,22 +11260,34 @@ ], "instructions": [ [ - "CMEQ" + "FADDP" + ], + [ + "MOVPRFX", + "FADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_s32", + "SIMD_ISA": "SVE2", + "name": "svaddp[_s16]_m", "arguments": [ - "int32x2_t a" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -9547,22 +11295,34 @@ ], "instructions": [ [ - "CMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_s64", + "SIMD_ISA": "SVE2", + "name": "svaddp[_s16]_x", "arguments": [ - "int64x1_t a" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -9570,22 +11330,34 @@ ], "instructions": [ [ - "CMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_s8", + "SIMD_ISA": "SVE2", + "name": "svaddp[_s32]_m", "arguments": [ - "int8x8_t a" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -9593,22 +11365,34 @@ ], "instructions": [ [ - "CMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_u16", + "SIMD_ISA": "SVE2", + "name": "svaddp[_s32]_x", "arguments": [ - "uint16x4_t a" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -9616,22 +11400,34 @@ ], "instructions": [ [ - "CMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_u32", + "SIMD_ISA": "SVE2", + "name": "svaddp[_s64]_m", "arguments": [ - "uint32x2_t a" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -9639,22 +11435,34 @@ ], "instructions": [ [ - "CMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_u64", + "SIMD_ISA": "SVE2", + "name": "svaddp[_s64]_x", "arguments": [ - "uint64x1_t a" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -9662,22 +11470,34 @@ ], "instructions": [ [ - "CMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_u8", + "SIMD_ISA": "SVE2", + "name": "svaddp[_s8]_m", "arguments": [ - "uint8x8_t a" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -9685,22 +11505,34 @@ ], "instructions": [ [ - "CMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzd_f64", + "SIMD_ISA": "SVE2", + "name": "svaddp[_s8]_x", "arguments": [ - "float64_t a" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -9708,22 +11540,34 @@ ], "instructions": [ [ - "FCMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzd_s64", + "SIMD_ISA": "SVE2", + "name": "svaddp[_u16]_m", "arguments": [ - "int64_t a" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -9731,22 +11575,34 @@ ], "instructions": [ [ - "CMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzd_u64", + "SIMD_ISA": "SVE2", + "name": "svaddp[_u16]_x", "arguments": [ - "uint64_t a" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -9754,22 +11610,34 @@ ], "instructions": [ [ - "CMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzh_f16", + "SIMD_ISA": "SVE2", + "name": "svaddp[_u32]_m", "arguments": [ - "float16_t a" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -9777,46 +11645,69 @@ ], "instructions": [ [ - "FCMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_f16", + "SIMD_ISA": "SVE2", + "name": "svaddp[_u32]_x", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_f32", + "SIMD_ISA": "SVE2", + "name": "svaddp[_u64]_m", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -9824,22 +11715,34 @@ ], "instructions": [ [ - "FCMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_f64", + "SIMD_ISA": "SVE2", + "name": "svaddp[_u64]_x", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -9847,46 +11750,69 @@ ], "instructions": [ [ - "FCMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_p64", + "SIMD_ISA": "SVE2", + "name": "svaddp[_u8]_m", "arguments": [ - "poly64x2_t a" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_p8", + "SIMD_ISA": "SVE2", + "name": "svaddp[_u8]_x", "arguments": [ - "poly8x16_t a" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -9894,22 +11820,30 @@ ], "instructions": [ [ - "CMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_s16", + "SIMD_ISA": "SVE", + "name": "svaddv[_f16]", "arguments": [ - "int16x8_t a" + "svbool_t pg", + "svfloat16_t op" ], "return_type": { - "value": "uint16x8_t" + "value": "float16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -9917,22 +11851,26 @@ ], "instructions": [ [ - "CMEQ" + "FADDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_s32", + "SIMD_ISA": "SVE", + "name": "svaddv[_f32]", "arguments": [ - "int32x4_t a" + "svbool_t pg", + "svfloat32_t op" ], "return_type": { - "value": "uint32x4_t" + "value": "float32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -9940,22 +11878,26 @@ ], "instructions": [ [ - "CMEQ" + "FADDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_s64", + "SIMD_ISA": "SVE", + "name": "svaddv[_f64]", "arguments": [ - "int64x2_t a" + "svbool_t pg", + "svfloat64_t op" ], "return_type": { - "value": "uint64x2_t" + "value": "float64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -9963,22 +11905,26 @@ ], "instructions": [ [ - "CMEQ" + "FADDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_s8", + "SIMD_ISA": "SVE", + "name": "svaddv[_s16]", "arguments": [ - "int8x16_t a" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "uint8x16_t" + "value": "int64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -9986,22 +11932,26 @@ ], "instructions": [ [ - "CMEQ" + "SADDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_u16", + "SIMD_ISA": "SVE", + "name": "svaddv[_s32]", "arguments": [ - "uint16x8_t a" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint16x8_t" + "value": "int64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -10009,22 +11959,26 @@ ], "instructions": [ [ - "CMEQ" + "SADDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_u32", + "SIMD_ISA": "SVE", + "name": "svaddv[_s64]", "arguments": [ - "uint32x4_t a" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "uint32x4_t" + "value": "int64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -10032,22 +11986,26 @@ ], "instructions": [ [ - "CMEQ" + "UADDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_u64", + "SIMD_ISA": "SVE", + "name": "svaddv[_s8]", "arguments": [ - "uint64x2_t a" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "uint64x2_t" + "value": "int64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -10055,22 +12013,26 @@ ], "instructions": [ [ - "CMEQ" + "SADDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_u8", + "SIMD_ISA": "SVE", + "name": "svaddv[_u16]", "arguments": [ - "uint8x16_t a" + "svbool_t pg", + "svuint16_t op" ], "return_type": { - "value": "uint8x16_t" + "value": "uint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -10078,22 +12040,26 @@ ], "instructions": [ [ - "CMEQ" + "UADDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzs_f32", + "SIMD_ISA": "SVE", + "name": "svaddv[_u32]", "arguments": [ - "float32_t a" + "svbool_t pg", + "svuint32_t op" ], "return_type": { - "value": "uint32_t" + "value": "uint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -10101,83 +12067,80 @@ ], "instructions": [ [ - "FCMEQ" + "UADDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcge_f16", + "SIMD_ISA": "SVE", + "name": "svaddv[_u64]", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svuint64_t op" ], "return_type": { - "value": "uint16x4_t" + "value": "uint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op": { + "register": "Zop.D" }, - "b": { - "register": "Vm.4H" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGE" + "UADDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcge_f32", + "SIMD_ISA": "SVE", + "name": "svaddv[_u8]", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svuint8_t op" ], "return_type": { - "value": "uint32x2_t" + "value": "uint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op": { + "register": "Zop.B" }, - "b": { - "register": "Vm.2S" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCMGE" + "UADDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcge_f64", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_n_s16]", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svint16_t op1", + "int8_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ @@ -10185,84 +12148,80 @@ ], "instructions": [ [ - "FCMGE" + "SADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcge_s16", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_n_s32]", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svint32_t op1", + "int16_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "SADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcge_s32", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_n_s64]", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svint64_t op1", + "int32_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "SADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcge_s64", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_n_u16]", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "svuint16_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ @@ -10270,113 +12229,107 @@ ], "instructions": [ [ - "CMGE" + "UADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcge_s8", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_n_u32]", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svuint32_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "UADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcge_u16", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_n_u64]", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svuint64_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "UADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcge_u32", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_s16]", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svint16_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "SADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcge_u64", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_s32]", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "svint32_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -10384,55 +12337,53 @@ ], "instructions": [ [ - "CMHS" + "SADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcge_u8", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_s64]", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svint64_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "SADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcged_f64", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_u16]", "arguments": [ - "float64_t a", - "float64_t b" + "svuint16_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -10440,26 +12391,26 @@ ], "instructions": [ [ - "FCMGE" + "UADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcged_s64", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_u32]", "arguments": [ - "int64_t a", - "int64_t b" + "svuint32_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -10467,26 +12418,26 @@ ], "instructions": [ [ - "CMGE" + "UADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcged_u64", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_u64]", "arguments": [ - "uint64_t a", - "uint64_t b" + "svuint64_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ @@ -10494,26 +12445,26 @@ ], "instructions": [ [ - "CMHS" + "UADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeh_f16", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_n_s16]", "arguments": [ - "float16_t a", - "float16_t b" + "svint16_t op1", + "int8_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Hm" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ @@ -10521,83 +12472,80 @@ ], "instructions": [ [ - "FCMGE" + "SADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeq_f16", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_n_s32]", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svint32_t op1", + "int16_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGE" + "SADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeq_f32", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_n_s64]", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svint64_t op1", + "int32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCMGE" + "SADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeq_f64", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_n_u16]", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svuint16_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ @@ -10605,84 +12553,80 @@ ], "instructions": [ [ - "FCMGE" + "UADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeq_s16", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_n_u32]", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svuint32_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "UADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeq_s32", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_n_u64]", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svuint64_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "UADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeq_s64", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_s16]", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "svint16_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -10690,113 +12634,107 @@ ], "instructions": [ [ - "CMGE" + "SADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeq_s8", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_s32]", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svint32_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "SADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeq_u16", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_s64]", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svint64_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "SADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeq_u32", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_u16]", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svuint16_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "UADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeq_u64", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_u32]", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "svuint32_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -10804,55 +12742,53 @@ ], "instructions": [ [ - "CMHS" + "UADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeq_u8", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_u64]", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svuint64_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "UADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcges_f32", + "SIMD_ISA": "SVE", + "name": "svadrb[_u32base]_[s32]offset", "arguments": [ - "float32_t a", - "float32_t b" + "svuint32_t bases", + "svint32_t offsets" ], "return_type": { - "value": "uint32_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "bases": { + "register": "Zbases.S" }, - "b": { - "register": "Sm" + "offsets": { + "register": "Zoffsets.S" } }, "Architectures": [ @@ -10860,46 +12796,53 @@ ], "instructions": [ [ - "FCMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgez_f16", + "SIMD_ISA": "SVE", + "name": "svadrb[_u32base]_[u32]offset", "arguments": [ - "float16x4_t a" + "svuint32_t bases", + "svuint32_t offsets" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "bases": { + "register": "Zbases.S" + }, + "offsets": { + "register": "Zoffsets.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgez_f32", + "SIMD_ISA": "SVE", + "name": "svadrb[_u64base]_[s64]offset", "arguments": [ - "float32x2_t a" + "svuint64_t bases", + "svint64_t offsets" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "bases": { + "register": "Zbases.D" + }, + "offsets": { + "register": "Zoffsets.D" } }, "Architectures": [ @@ -10907,22 +12850,26 @@ ], "instructions": [ [ - "FCMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgez_f64", + "SIMD_ISA": "SVE", + "name": "svadrb[_u64base]_[u64]offset", "arguments": [ - "float64x1_t a" + "svuint64_t bases", + "svuint64_t offsets" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "bases": { + "register": "Zbases.D" + }, + "offsets": { + "register": "Zoffsets.D" } }, "Architectures": [ @@ -10930,22 +12877,26 @@ ], "instructions": [ [ - "FCMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgez_s16", + "SIMD_ISA": "SVE", + "name": "svadrd[_u32base]_[s32]index", "arguments": [ - "int16x4_t a" + "svuint32_t bases", + "svint32_t indices" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "bases": { + "register": "Zbases.S" + }, + "indices": { + "register": "Zindices.S" } }, "Architectures": [ @@ -10953,22 +12904,26 @@ ], "instructions": [ [ - "CMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgez_s32", + "SIMD_ISA": "SVE", + "name": "svadrd[_u32base]_[u32]index", "arguments": [ - "int32x2_t a" + "svuint32_t bases", + "svuint32_t indices" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "bases": { + "register": "Zbases.S" + }, + "indices": { + "register": "Zindices.S" } }, "Architectures": [ @@ -10976,22 +12931,26 @@ ], "instructions": [ [ - "CMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgez_s64", + "SIMD_ISA": "SVE", + "name": "svadrd[_u64base]_[s64]index", "arguments": [ - "int64x1_t a" + "svuint64_t bases", + "svint64_t indices" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "bases": { + "register": "Zbases.D" + }, + "indices": { + "register": "Zindices.D" } }, "Architectures": [ @@ -10999,22 +12958,26 @@ ], "instructions": [ [ - "CMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgez_s8", + "SIMD_ISA": "SVE", + "name": "svadrd[_u64base]_[u64]index", "arguments": [ - "int8x8_t a" + "svuint64_t bases", + "svuint64_t indices" ], "return_type": { - "value": "uint8x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "bases": { + "register": "Zbases.D" + }, + "indices": { + "register": "Zindices.D" } }, "Architectures": [ @@ -11022,22 +12985,26 @@ ], "instructions": [ [ - "CMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgezd_f64", + "SIMD_ISA": "SVE", + "name": "svadrh[_u32base]_[s32]index", "arguments": [ - "float64_t a" + "svuint32_t bases", + "svint32_t indices" ], "return_type": { - "value": "uint64_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "bases": { + "register": "Zbases.S" + }, + "indices": { + "register": "Zindices.S" } }, "Architectures": [ @@ -11045,22 +13012,26 @@ ], "instructions": [ [ - "FCMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgezd_s64", + "SIMD_ISA": "SVE", + "name": "svadrh[_u32base]_[u32]index", "arguments": [ - "int64_t a" + "svuint32_t bases", + "svuint32_t indices" ], "return_type": { - "value": "uint64_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "bases": { + "register": "Zbases.S" + }, + "indices": { + "register": "Zindices.S" } }, "Architectures": [ @@ -11068,22 +13039,26 @@ ], "instructions": [ [ - "CMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgezh_f16", + "SIMD_ISA": "SVE", + "name": "svadrh[_u64base]_[s64]index", "arguments": [ - "float16_t a" + "svuint64_t bases", + "svint64_t indices" ], "return_type": { - "value": "uint16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "bases": { + "register": "Zbases.D" + }, + "indices": { + "register": "Zindices.D" } }, "Architectures": [ @@ -11091,46 +13066,53 @@ ], "instructions": [ [ - "FCMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgezq_f16", + "SIMD_ISA": "SVE", + "name": "svadrh[_u64base]_[u64]index", "arguments": [ - "float16x8_t a" + "svuint64_t bases", + "svuint64_t indices" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "bases": { + "register": "Zbases.D" + }, + "indices": { + "register": "Zindices.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgezq_f32", + "SIMD_ISA": "SVE", + "name": "svadrw[_u32base]_[s32]index", "arguments": [ - "float32x4_t a" + "svuint32_t bases", + "svint32_t indices" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "bases": { + "register": "Zbases.S" + }, + "indices": { + "register": "Zindices.S" } }, "Architectures": [ @@ -11138,22 +13120,26 @@ ], "instructions": [ [ - "FCMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgezq_f64", + "SIMD_ISA": "SVE", + "name": "svadrw[_u32base]_[u32]index", "arguments": [ - "float64x2_t a" + "svuint32_t bases", + "svuint32_t indices" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "bases": { + "register": "Zbases.S" + }, + "indices": { + "register": "Zindices.S" } }, "Architectures": [ @@ -11161,22 +13147,26 @@ ], "instructions": [ [ - "FCMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgezq_s16", + "SIMD_ISA": "SVE", + "name": "svadrw[_u64base]_[s64]index", "arguments": [ - "int16x8_t a" + "svuint64_t bases", + "svint64_t indices" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "bases": { + "register": "Zbases.D" + }, + "indices": { + "register": "Zindices.D" } }, "Architectures": [ @@ -11184,22 +13174,26 @@ ], "instructions": [ [ - "CMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgezq_s32", + "SIMD_ISA": "SVE", + "name": "svadrw[_u64base]_[u64]index", "arguments": [ - "int32x4_t a" + "svuint64_t bases", + "svuint64_t indices" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "bases": { + "register": "Zbases.D" + }, + "indices": { + "register": "Zindices.D" } }, "Architectures": [ @@ -11207,22 +13201,26 @@ ], "instructions": [ [ - "CMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgezq_s64", + "SIMD_ISA": "SVE2", + "name": "svaesd[_u8]", "arguments": [ - "int64x2_t a" + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" } }, "Architectures": [ @@ -11230,22 +13228,29 @@ ], "instructions": [ [ - "CMGE" + "AESD" + ], + [ + "AESD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgezq_s8", + "SIMD_ISA": "SVE2", + "name": "svaese[_u8]", "arguments": [ - "int8x16_t a" + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" } }, "Architectures": [ @@ -11253,22 +13258,25 @@ ], "instructions": [ [ - "CMGE" + "AESE" + ], + [ + "AESE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgezs_f32", + "SIMD_ISA": "SVE2", + "name": "svaesimc[_u8]", "arguments": [ - "float32_t a" + "svuint8_t op" ], "return_type": { - "value": "uint32_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op": { + "register": "Ztied.B" } }, "Architectures": [ @@ -11276,83 +13284,84 @@ ], "instructions": [ [ - "FCMGE" + "AESIMC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgt_f16", + "SIMD_ISA": "SVE2", + "name": "svaesmc[_u8]", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svuint8_t op" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" + "op": { + "register": "Ztied.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGT" + "AESMC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgt_f32", + "SIMD_ISA": "SVE", + "name": "svand[_b]_z", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Pop1.B" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCMGT" + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgt_f64", + "SIMD_ISA": "SVE", + "name": "svand[_n_s16]_m", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Ztied1.H" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -11360,84 +13369,113 @@ ], "instructions": [ [ - "FCMGT" + "UXTB" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgt_s16", + "SIMD_ISA": "SVE", + "name": "svand[_n_s16]_x", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgt_s32", + "SIMD_ISA": "SVE", + "name": "svand[_n_s16]_z", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "MOVPRFX", + "UXTB" + ], + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgt_s64", + "SIMD_ISA": "SVE", + "name": "svand[_n_s32]_m", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Ztied1.S" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -11445,113 +13483,160 @@ ], "instructions": [ [ - "CMGT" + "UXTB" + ], + [ + "UXTH" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgt_s8", + "SIMD_ISA": "SVE", + "name": "svand[_n_s32]_x", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgt_u16", + "SIMD_ISA": "SVE", + "name": "svand[_n_s32]_z", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "MOVPRFX", + "UXTB" + ], + [ + "MOVPRFX", + "UXTH" + ], + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgt_u32", + "SIMD_ISA": "SVE", + "name": "svand[_n_s64]_m", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Ztied1.D" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "UXTB" + ], + [ + "UXTH" + ], + [ + "UXTW" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgt_u64", + "SIMD_ISA": "SVE", + "name": "svand[_n_s64]_x", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -11559,55 +13644,87 @@ ], "instructions": [ [ - "CMHI" + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgt_u8", + "SIMD_ISA": "SVE", + "name": "svand[_n_s64]_z", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "MOVPRFX", + "UXTB" + ], + [ + "MOVPRFX", + "UXTH" + ], + [ + "MOVPRFX", + "UXTW" + ], + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtd_f64", + "SIMD_ISA": "SVE", + "name": "svand[_n_s8]_m", "arguments": [ - "float64_t a", - "float64_t b" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -11615,26 +13732,34 @@ ], "instructions": [ [ - "FCMGT" + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtd_s64", + "SIMD_ISA": "SVE", + "name": "svand[_n_s8]_x", "arguments": [ - "int64_t a", - "int64_t b" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -11642,26 +13767,39 @@ ], "instructions": [ [ - "CMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtd_u64", + "SIMD_ISA": "SVE", + "name": "svand[_n_s8]_z", "arguments": [ - "uint64_t a", - "uint64_t b" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -11669,26 +13807,35 @@ ], "instructions": [ [ - "CMHI" + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgth_f16", + "SIMD_ISA": "SVE", + "name": "svand[_n_u16]_m", "arguments": [ - "float16_t a", - "float16_t b" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Ztied1.H" }, - "b": { - "register": "Hm" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -11696,83 +13843,113 @@ ], "instructions": [ [ - "FCMGT" + "UXTB" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtq_f16", + "SIMD_ISA": "SVE", + "name": "svand[_n_u16]_x", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtq_f32", + "SIMD_ISA": "SVE", + "name": "svand[_n_u16]_z", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCMGT" + "MOVPRFX", + "UXTB" + ], + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtq_f64", + "SIMD_ISA": "SVE", + "name": "svand[_n_u32]_m", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Ztied1.S" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -11780,84 +13957,120 @@ ], "instructions": [ [ - "FCMGT" + "UXTB" + ], + [ + "UXTH" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtq_s16", + "SIMD_ISA": "SVE", + "name": "svand[_n_u32]_x", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtq_s32", + "SIMD_ISA": "SVE", + "name": "svand[_n_u32]_z", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "MOVPRFX", + "UXTB" + ], + [ + "MOVPRFX", + "UXTH" + ], + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtq_s64", + "SIMD_ISA": "SVE", + "name": "svand[_n_u64]_m", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Ztied1.D" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -11865,113 +14078,162 @@ ], "instructions": [ [ - "CMGT" + "UXTB" + ], + [ + "UXTH" + ], + [ + "UXTW" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtq_s8", + "SIMD_ISA": "SVE", + "name": "svand[_n_u64]_x", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtq_u16", + "SIMD_ISA": "SVE", + "name": "svand[_n_u64]_z", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "MOVPRFX", + "UXTB" + ], + [ + "MOVPRFX", + "UXTH" + ], + [ + "MOVPRFX", + "UXTW" + ], + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtq_u32", + "SIMD_ISA": "SVE", + "name": "svand[_n_u8]_m", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtq_u64", + "SIMD_ISA": "SVE", + "name": "svand[_n_u8]_x", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -11979,55 +14241,75 @@ ], "instructions": [ [ - "CMHI" + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtq_u8", + "SIMD_ISA": "SVE", + "name": "svand[_n_u8]_z", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgts_f32", + "SIMD_ISA": "SVE", + "name": "svand[_s16]_m", "arguments": [ - "float32_t a", - "float32_t b" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Sm" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -12035,46 +14317,71 @@ ], "instructions": [ [ - "FCMGT" + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtz_f16", + "SIMD_ISA": "SVE", + "name": "svand[_s16]_x", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtz_f32", + "SIMD_ISA": "SVE", + "name": "svand[_s16]_z", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -12082,22 +14389,35 @@ ], "instructions": [ [ - "FCMGT" + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtz_f64", + "SIMD_ISA": "SVE", + "name": "svand[_s32]_m", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -12105,22 +14425,34 @@ ], "instructions": [ [ - "FCMGT" + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtz_s16", + "SIMD_ISA": "SVE", + "name": "svand[_s32]_x", "arguments": [ - "int16x4_t a" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -12128,22 +14460,36 @@ ], "instructions": [ [ - "CMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtz_s32", + "SIMD_ISA": "SVE", + "name": "svand[_s32]_z", "arguments": [ - "int32x2_t a" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -12151,22 +14497,35 @@ ], "instructions": [ [ - "CMGT" + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtz_s64", + "SIMD_ISA": "SVE", + "name": "svand[_s64]_m", "arguments": [ - "int64x1_t a" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -12174,22 +14533,34 @@ ], "instructions": [ [ - "CMGT" + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtz_s8", + "SIMD_ISA": "SVE", + "name": "svand[_s64]_x", "arguments": [ - "int8x8_t a" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -12197,22 +14568,36 @@ ], "instructions": [ [ - "CMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtzd_f64", + "SIMD_ISA": "SVE", + "name": "svand[_s64]_z", "arguments": [ - "float64_t a" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -12220,22 +14605,35 @@ ], "instructions": [ [ - "FCMGT" + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtzd_s64", + "SIMD_ISA": "SVE", + "name": "svand[_s8]_m", "arguments": [ - "int64_t a" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -12243,22 +14641,34 @@ ], "instructions": [ [ - "CMGT" + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtzh_f16", + "SIMD_ISA": "SVE", + "name": "svand[_s8]_x", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -12266,46 +14676,72 @@ ], "instructions": [ [ - "FCMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtzq_f16", + "SIMD_ISA": "SVE", + "name": "svand[_s8]_z", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGT" + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtzq_f32", + "SIMD_ISA": "SVE", + "name": "svand[_u16]_m", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -12313,22 +14749,34 @@ ], "instructions": [ [ - "FCMGT" + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtzq_f64", + "SIMD_ISA": "SVE", + "name": "svand[_u16]_x", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -12336,22 +14784,36 @@ ], "instructions": [ [ - "FCMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtzq_s16", + "SIMD_ISA": "SVE", + "name": "svand[_u16]_z", "arguments": [ - "int16x8_t a" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -12359,22 +14821,35 @@ ], "instructions": [ [ - "CMGT" + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtzq_s32", + "SIMD_ISA": "SVE", + "name": "svand[_u32]_m", "arguments": [ - "int32x4_t a" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -12382,22 +14857,34 @@ ], "instructions": [ [ - "CMGT" + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtzq_s64", + "SIMD_ISA": "SVE", + "name": "svand[_u32]_x", "arguments": [ - "int64x2_t a" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -12405,22 +14892,36 @@ ], "instructions": [ [ - "CMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtzq_s8", + "SIMD_ISA": "SVE", + "name": "svand[_u32]_z", "arguments": [ - "int8x16_t a" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -12428,22 +14929,35 @@ ], "instructions": [ [ - "CMGT" + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtzs_f32", + "SIMD_ISA": "SVE", + "name": "svand[_u64]_m", "arguments": [ - "float32_t a" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -12451,83 +14965,107 @@ ], "instructions": [ [ - "FCMGT" + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcle_f16", + "SIMD_ISA": "SVE", + "name": "svand[_u64]_x", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGE" + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcle_f32", + "SIMD_ISA": "SVE", + "name": "svand[_u64]_z", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCMGE" - ] - ] + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] }, { - "SIMD_ISA": "Neon", - "name": "vcle_f64", + "SIMD_ISA": "SVE", + "name": "svand[_u8]_m", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -12535,84 +15073,103 @@ ], "instructions": [ [ - "FCMGE" + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcle_s16", + "SIMD_ISA": "SVE", + "name": "svand[_u8]_x", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcle_s32", + "SIMD_ISA": "SVE", + "name": "svand[_u8]_z", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcle_s64", + "SIMD_ISA": "SVE", + "name": "svandv[_s16]", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "uint64x1_t" + "value": "int16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.H" }, - "b": { - "register": "Dm" + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -12620,113 +15177,107 @@ ], "instructions": [ [ - "CMGE" + "ANDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcle_s8", + "SIMD_ISA": "SVE", + "name": "svandv[_s32]", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint8x8_t" + "value": "int32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op": { + "register": "Zop.S" }, - "b": { - "register": "Vm.8B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "ANDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcle_u16", + "SIMD_ISA": "SVE", + "name": "svandv[_s64]", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "uint16x4_t" + "value": "int64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op": { + "register": "Zop.D" }, - "b": { - "register": "Vm.4H" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "ANDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcle_u32", + "SIMD_ISA": "SVE", + "name": "svandv[_s8]", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "uint32x2_t" + "value": "int8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op": { + "register": "Zop.B" }, - "b": { - "register": "Vm.2S" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "ANDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcle_u64", + "SIMD_ISA": "SVE", + "name": "svandv[_u16]", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "svbool_t pg", + "svuint16_t op" ], "return_type": { - "value": "uint64x1_t" + "value": "uint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.H" }, - "b": { - "register": "Dm" + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -12734,55 +15285,53 @@ ], "instructions": [ [ - "CMHS" + "ANDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcle_u8", + "SIMD_ISA": "SVE", + "name": "svandv[_u32]", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svbool_t pg", + "svuint32_t op" ], "return_type": { - "value": "uint8x8_t" + "value": "uint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op": { + "register": "Zop.S" }, - "b": { - "register": "Vm.8B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "ANDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcled_f64", + "SIMD_ISA": "SVE", + "name": "svandv[_u64]", "arguments": [ - "float64_t a", - "float64_t b" + "svbool_t pg", + "svuint64_t op" ], "return_type": { "value": "uint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.D" }, - "b": { - "register": "Dm" + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -12790,26 +15339,26 @@ ], "instructions": [ [ - "FCMGE" + "ANDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcled_s64", + "SIMD_ISA": "SVE", + "name": "svandv[_u8]", "arguments": [ - "int64_t a", - "int64_t b" + "svbool_t pg", + "svuint8_t op" ], "return_type": { - "value": "uint64_t" + "value": "uint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.B" }, - "b": { - "register": "Dm" + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -12817,26 +15366,30 @@ ], "instructions": [ [ - "CMGE" + "ANDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcled_u64", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s16]_m", "arguments": [ - "uint64_t a", - "uint64_t b" + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Ztied1.H" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -12844,26 +15397,33 @@ ], "instructions": [ [ - "CMHS" + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleh_f16", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s16]_x", "arguments": [ - "float16_t a", - "float16_t b" + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Hm" + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -12871,83 +15431,114 @@ ], "instructions": [ [ - "FCMGE" + "ASR" + ], + [ + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleq_f16", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s16]_z", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGE" + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleq_f32", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s32]_m", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Ztied1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCMGE" + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleq_f64", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s32]_x", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -12955,84 +15546,114 @@ ], "instructions": [ [ - "FCMGE" + "ASR" + ], + [ + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleq_s16", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s32]_z", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleq_s32", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s64]_m", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Ztied1.D" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleq_s64", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s64]_x", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -13040,113 +15661,155 @@ ], "instructions": [ [ - "CMGE" + "ASR" + ], + [ + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleq_s8", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s64]_z", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleq_u16", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s8]_m", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Ztied1.B" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleq_u32", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s8]_x", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "ASR" + ], + [ + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleq_u64", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s8]_z", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -13154,55 +15817,74 @@ ], "instructions": [ [ - "CMHS" + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleq_u8", + "SIMD_ISA": "SVE", + "name": "svasr[_s16]_m", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "ASR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcles_f32", + "SIMD_ISA": "SVE", + "name": "svasr[_s16]_x", "arguments": [ - "float32_t a", - "float32_t b" + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Sm" + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -13210,46 +15892,73 @@ ], "instructions": [ [ - "FCMGE" + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclez_f16", + "SIMD_ISA": "SVE", + "name": "svasr[_s16]_z", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLE" + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclez_f32", + "SIMD_ISA": "SVE", + "name": "svasr[_s32]_m", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -13257,22 +15966,34 @@ ], "instructions": [ [ - "CMLE" + "ASR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclez_f64", + "SIMD_ISA": "SVE", + "name": "svasr[_s32]_x", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -13280,22 +16001,37 @@ ], "instructions": [ [ - "FCMLE" + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclez_s16", + "SIMD_ISA": "SVE", + "name": "svasr[_s32]_z", "arguments": [ - "int16x4_t a" + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -13303,22 +16039,35 @@ ], "instructions": [ [ - "CMLE" + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclez_s32", + "SIMD_ISA": "SVE", + "name": "svasr[_s64]_m", "arguments": [ - "int32x2_t a" + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -13326,22 +16075,34 @@ ], "instructions": [ [ - "CMLE" + "ASR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclez_s64", + "SIMD_ISA": "SVE", + "name": "svasr[_s64]_x", "arguments": [ - "int64x1_t a" + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -13349,22 +16110,37 @@ ], "instructions": [ [ - "CMLE" + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclez_s8", + "SIMD_ISA": "SVE", + "name": "svasr[_s64]_z", "arguments": [ - "int8x8_t a" + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -13372,22 +16148,35 @@ ], "instructions": [ [ - "CMLE" + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclezd_f64", + "SIMD_ISA": "SVE", + "name": "svasr[_s8]_m", "arguments": [ - "float64_t a" + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -13395,22 +16184,34 @@ ], "instructions": [ [ - "FCMLE" + "ASR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclezd_s64", + "SIMD_ISA": "SVE", + "name": "svasr[_s8]_x", "arguments": [ - "int64_t a" + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -13418,22 +16219,37 @@ ], "instructions": [ [ - "CMLE" + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclezh_f16", + "SIMD_ISA": "SVE", + "name": "svasr[_s8]_z", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -13441,46 +16257,69 @@ ], "instructions": [ [ - "FCMLE" + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclezq_f16", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s16]_m", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svint16_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLE" + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclezq_f32", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s16]_x", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svint16_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -13488,22 +16327,36 @@ ], "instructions": [ [ - "FCMLE" + "ASR" + ], + [ + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclezq_f64", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s16]_z", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svint16_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -13511,22 +16364,35 @@ ], "instructions": [ [ - "FCMLE" + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclezq_s16", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s32]_m", "arguments": [ - "int16x8_t a" + "svbool_t pg", + "svint32_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -13534,22 +16400,33 @@ ], "instructions": [ [ - "CMLE" + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclezq_s32", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s32]_x", "arguments": [ - "int32x4_t a" + "svbool_t pg", + "svint32_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -13557,22 +16434,36 @@ ], "instructions": [ [ - "CMLE" + "ASR" + ], + [ + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclezq_s64", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s32]_z", "arguments": [ - "int64x2_t a" + "svbool_t pg", + "svint32_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -13580,22 +16471,35 @@ ], "instructions": [ [ - "CMLE" + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclezq_s8", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s8]_m", "arguments": [ - "int8x16_t a" + "svbool_t pg", + "svint8_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -13603,22 +16507,33 @@ ], "instructions": [ [ - "CMLE" + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclezs_f32", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s8]_x", "arguments": [ - "float32_t a" + "svbool_t pg", + "svint8_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -13626,383 +16541,517 @@ ], "instructions": [ [ - "FCMLE" + "ASR" + ], + [ + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcls_s16", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s8]_z", "arguments": [ - "int16x4_t a" + "svbool_t pg", + "svint8_t op1", + "uint64_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcls_s32", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s16]_m", "arguments": [ - "int32x2_t a" + "svbool_t pg", + "svint16_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "ASR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcls_s8", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s16]_x", "arguments": [ - "int8x8_t a" + "svbool_t pg", + "svint16_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcls_u16", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s16]_z", "arguments": [ - "uint16x4_t a" + "svbool_t pg", + "svint16_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcls_u32", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s32]_m", "arguments": [ - "uint32x2_t a" + "svbool_t pg", + "svint32_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "ASR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcls_u8", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s32]_x", "arguments": [ - "uint8x8_t a" + "svbool_t pg", + "svint32_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclsq_s16", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s32]_z", "arguments": [ - "int16x8_t a" + "svbool_t pg", + "svint32_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclsq_s32", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s8]_m", "arguments": [ - "int32x4_t a" + "svbool_t pg", + "svint8_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "ASR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclsq_s8", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s8]_x", "arguments": [ - "int8x16_t a" + "svbool_t pg", + "svint8_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int8x16_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclsq_u16", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s8]_z", "arguments": [ - "uint16x8_t a" + "svbool_t pg", + "svint8_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclsq_u32", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s16]_m", "arguments": [ - "uint32x4_t a" + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" ], "return_type": { - "value": "int32x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclsq_u8", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s16]_x", "arguments": [ - "uint8x16_t a" + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" ], "return_type": { - "value": "int8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclt_f16", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s16]_z", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "imm2": { + "minimum": 1, + "maximum": 16 }, - "b": { - "register": "Vm.4H" + "op1": { + "register": "Zop1.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGT" + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclt_f32", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s32]_m", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "imm2": { + "minimum": 1, + "maximum": 32 }, - "b": { - "register": "Vm.2S" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCMGT" + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclt_f64", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s32]_x", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "imm2": { + "minimum": 1, + "maximum": 32 }, - "b": { - "register": "Dm" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -14010,84 +17059,104 @@ ], "instructions": [ [ - "FCMGT" + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclt_s16", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s32]_z", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "imm2": { + "minimum": 1, + "maximum": 32 }, - "b": { - "register": "Vm.4H" + "op1": { + "register": "Zop1.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclt_s32", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s64]_m", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "imm2": { + "minimum": 1, + "maximum": 64 }, - "b": { - "register": "Vm.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclt_s64", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s64]_x", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "imm2": { + "minimum": 1, + "maximum": 64 }, - "b": { - "register": "Dm" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -14095,113 +17164,140 @@ ], "instructions": [ [ - "CMGT" + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclt_s8", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s64]_z", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" ], "return_type": { - "value": "uint8x8_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "imm2": { + "minimum": 1, + "maximum": 64 }, - "b": { - "register": "Vm.8B" + "op1": { + "register": "Zop1.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclt_u16", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s8]_m", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "imm2": { + "minimum": 1, + "maximum": 8 }, - "b": { - "register": "Vm.4H" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclt_u32", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s8]_x", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "imm2": { + "minimum": 1, + "maximum": 8 }, - "b": { - "register": "Vm.2S" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclt_u64", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s8]_z", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "imm2": { + "minimum": 1, + "maximum": 8 }, - "b": { - "register": "Dm" + "op1": { + "register": "Zop1.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -14209,55 +17305,66 @@ ], "instructions": [ [ - "CMHI" + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclt_u8", + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_s16]", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svint16_t op1", + "svint16_t op2", + "int16_t op3" ], "return_type": { - "value": "uint8x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltd_f64", + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_s32]", "arguments": [ - "float64_t a", - "float64_t b" + "svint32_t op1", + "svint32_t op2", + "int32_t op3" ], "return_type": { - "value": "uint64_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ @@ -14265,26 +17372,34 @@ ], "instructions": [ [ - "FCMGT" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltd_s64", + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_s64]", "arguments": [ - "int64_t a", - "int64_t b" + "svint64_t op1", + "svint64_t op2", + "int64_t op3" ], "return_type": { - "value": "uint64_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" } }, "Architectures": [ @@ -14292,26 +17407,34 @@ ], "instructions": [ [ - "CMGT" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltd_u64", + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_s8]", "arguments": [ - "uint64_t a", - "uint64_t b" + "svint8_t op1", + "svint8_t op2", + "int8_t op3" ], "return_type": { - "value": "uint64_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ @@ -14319,26 +17442,34 @@ ], "instructions": [ [ - "CMHI" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclth_f16", + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_u16]", "arguments": [ - "float16_t a", - "float16_t b" + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" ], "return_type": { - "value": "uint16_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Hm" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ @@ -14346,83 +17477,104 @@ ], "instructions": [ [ - "FCMGT" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltq_f16", + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_u32]", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGT" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltq_f32", + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_u64]", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCMGT" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltq_f64", + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_u8]", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ @@ -14430,84 +17582,104 @@ ], "instructions": [ [ - "FCMGT" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltq_s16", + "SIMD_ISA": "SVE2", + "name": "svbcax[_s16]", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltq_s32", + "SIMD_ISA": "SVE2", + "name": "svbcax[_s32]", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" ], "return_type": { - "value": "uint32x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltq_s64", + "SIMD_ISA": "SVE2", + "name": "svbcax[_s64]", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" ], "return_type": { - "value": "uint64x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" } }, "Architectures": [ @@ -14515,113 +17687,139 @@ ], "instructions": [ [ - "CMGT" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltq_s8", + "SIMD_ISA": "SVE2", + "name": "svbcax[_s8]", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" ], "return_type": { - "value": "uint8x16_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltq_u16", + "SIMD_ISA": "SVE2", + "name": "svbcax[_u16]", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltq_u32", + "SIMD_ISA": "SVE2", + "name": "svbcax[_u32]", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltq_u64", + "SIMD_ISA": "SVE2", + "name": "svbcax[_u64]", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" } }, "Architectures": [ @@ -14629,55 +17827,65 @@ ], "instructions": [ [ - "CMHI" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltq_u8", + "SIMD_ISA": "SVE2", + "name": "svbcax[_u8]", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclts_f32", + "SIMD_ISA": "SVE2", + "name": "svbdep[_n_u16]", "arguments": [ - "float32_t a", - "float32_t b" + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Sm" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -14685,46 +17893,53 @@ ], "instructions": [ [ - "FCMGT" + "BDEP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltz_f16", + "SIMD_ISA": "SVE2", + "name": "svbdep[_n_u32]", "arguments": [ - "float16x4_t a" + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLT" + "BDEP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltz_f32", + "SIMD_ISA": "SVE2", + "name": "svbdep[_n_u64]", "arguments": [ - "float32x2_t a" + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" } }, "Architectures": [ @@ -14732,22 +17947,26 @@ ], "instructions": [ [ - "FCMLT" + "BDEP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltz_f64", + "SIMD_ISA": "SVE2", + "name": "svbdep[_n_u8]", "arguments": [ - "float64x1_t a" + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ @@ -14755,22 +17974,26 @@ ], "instructions": [ [ - "FCMLT" + "BDEP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltz_s16", + "SIMD_ISA": "SVE2", + "name": "svbdep[_u16]", "arguments": [ - "int16x4_t a" + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -14778,22 +18001,26 @@ ], "instructions": [ [ - "CMLT" + "BDEP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltz_s32", + "SIMD_ISA": "SVE2", + "name": "svbdep[_u32]", "arguments": [ - "int32x2_t a" + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" } }, "Architectures": [ @@ -14801,22 +18028,26 @@ ], "instructions": [ [ - "CMLT" + "BDEP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltz_s64", + "SIMD_ISA": "SVE2", + "name": "svbdep[_u64]", "arguments": [ - "int64x1_t a" + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" } }, "Architectures": [ @@ -14824,22 +18055,26 @@ ], "instructions": [ [ - "CMLT" + "BDEP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltz_s8", + "SIMD_ISA": "SVE2", + "name": "svbdep[_u8]", "arguments": [ - "int8x8_t a" + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -14847,22 +18082,26 @@ ], "instructions": [ [ - "CMLT" + "BDEP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltzd_f64", + "SIMD_ISA": "SVE2", + "name": "svbext[_n_u16]", "arguments": [ - "float64_t a" + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -14870,22 +18109,26 @@ ], "instructions": [ [ - "FCMLT" + "BEXT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltzd_s64", + "SIMD_ISA": "SVE2", + "name": "svbext[_n_u32]", "arguments": [ - "int64_t a" + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ @@ -14893,22 +18136,26 @@ ], "instructions": [ [ - "CMLT" + "BEXT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltzh_f16", + "SIMD_ISA": "SVE2", + "name": "svbext[_n_u64]", "arguments": [ - "float16_t a" + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" } }, "Architectures": [ @@ -14916,46 +18163,53 @@ ], "instructions": [ [ - "FCMLT" + "BEXT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltzq_f16", + "SIMD_ISA": "SVE2", + "name": "svbext[_n_u8]", "arguments": [ - "float16x8_t a" + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLT" + "BEXT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltzq_f32", + "SIMD_ISA": "SVE2", + "name": "svbext[_u16]", "arguments": [ - "float32x4_t a" + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -14963,22 +18217,26 @@ ], "instructions": [ [ - "FCMLT" + "BEXT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltzq_f64", + "SIMD_ISA": "SVE2", + "name": "svbext[_u32]", "arguments": [ - "float64x2_t a" + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" } }, "Architectures": [ @@ -14986,22 +18244,26 @@ ], "instructions": [ [ - "FCMLT" + "BEXT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltzq_s16", + "SIMD_ISA": "SVE2", + "name": "svbext[_u64]", "arguments": [ - "int16x8_t a" + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" } }, "Architectures": [ @@ -15009,22 +18271,26 @@ ], "instructions": [ [ - "CMLT" + "BEXT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltzq_s32", + "SIMD_ISA": "SVE2", + "name": "svbext[_u8]", "arguments": [ - "int32x4_t a" + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -15032,22 +18298,26 @@ ], "instructions": [ [ - "CMLT" + "BEXT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltzq_s64", + "SIMD_ISA": "SVE2", + "name": "svbgrp[_n_u16]", "arguments": [ - "int64x2_t a" + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -15055,22 +18325,26 @@ ], "instructions": [ [ - "CMLT" + "BGRP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltzq_s8", + "SIMD_ISA": "SVE2", + "name": "svbgrp[_n_u32]", "arguments": [ - "int8x16_t a" + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ @@ -15078,22 +18352,26 @@ ], "instructions": [ [ - "CMLT" + "BGRP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltzs_f32", + "SIMD_ISA": "SVE2", + "name": "svbgrp[_n_u64]", "arguments": [ - "float32_t a" + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" } }, "Architectures": [ @@ -15101,1138 +18379,1299 @@ ], "instructions": [ [ - "FCMLT" + "BGRP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclz_s16", + "SIMD_ISA": "SVE2", + "name": "svbgrp[_n_u8]", "arguments": [ - "int16x4_t a" + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "BGRP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclz_s32", + "SIMD_ISA": "SVE2", + "name": "svbgrp[_u16]", "arguments": [ - "int32x2_t a" + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "BGRP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclz_s8", + "SIMD_ISA": "SVE2", + "name": "svbgrp[_u32]", "arguments": [ - "int8x8_t a" + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "BGRP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclz_u16", + "SIMD_ISA": "SVE2", + "name": "svbgrp[_u64]", "arguments": [ - "uint16x4_t a" + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "BGRP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclz_u32", + "SIMD_ISA": "SVE2", + "name": "svbgrp[_u8]", "arguments": [ - "uint32x2_t a" + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "BGRP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclz_u8", + "SIMD_ISA": "SVE", + "name": "svbic[_b]_z", "arguments": [ - "uint8x8_t a" + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclzq_s16", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s16]_m", "arguments": [ - "int16x8_t a" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclzq_s32", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s16]_x", "arguments": [ - "int32x4_t a" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "AND" + ], + [ + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclzq_s8", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s16]_z", "arguments": [ - "int8x16_t a" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "int8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclzq_u16", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s32]_m", "arguments": [ - "uint16x8_t a" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclzq_u32", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s32]_x", "arguments": [ - "uint32x4_t a" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "AND" + ], + [ + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclzq_u8", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s32]_z", "arguments": [ - "uint8x16_t a" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s64]_m", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.4H" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s64]_x", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "AND" + ], + [ + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_lane_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s64]_z", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x4_t b", - "const int lane" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.D" }, - "r": { - "register": "Vd.4H" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_lane_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s8]_m", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x2_t b", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "r": { - "register": "Vd.2S" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_laneq_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s8]_x", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x8_t b", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "r": { - "register": "Vd.4H" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "AND" + ], + [ + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_laneq_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s8]_z", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x4_t b", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B" }, - "r": { - "register": "Vd.2S" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "DUP", - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot180_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u16]_m", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot180_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u16]_x", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.2S" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "AND" + ], + [ + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot180_lane_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u16]_z", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x4_t b", - "const int lane" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.H" }, - "r": { - "register": "Vd.4H" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot180_lane_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u32]_m", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x2_t b", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "r": { - "register": "Vd.2S" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot180_laneq_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u32]_x", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x8_t b", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "r": { - "register": "Vd.4H" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "AND" + ], + [ + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot180_laneq_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u32]_z", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x4_t b", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S" }, - "r": { - "register": "Vd.2S" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "DUP", - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot270_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u64]_m", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.4H" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot270_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u64]_x", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "AND" + ], + [ + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot270_lane_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u64]_z", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x4_t b", - "const int lane" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.D" }, - "r": { - "register": "Vd.4H" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot270_lane_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u8]_m", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x2_t b", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "r": { - "register": "Vd.2S" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot270_laneq_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u8]_x", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x8_t b", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "r": { - "register": "Vd.4H" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "AND" + ], + [ + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot270_laneq_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u8]_z", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x4_t b", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B" }, - "r": { - "register": "Vd.2S" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "DUP", - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot90_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_s16]_m", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot90_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_s16]_x", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.2S" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot90_lane_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_s16]_z", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x4_t b", - "const int lane" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.H" }, - "r": { - "register": "Vd.4H" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot90_lane_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_s32]_m", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x2_t b", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "r": { - "register": "Vd.2S" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot90_laneq_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_s32]_x", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x8_t b", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "r": { - "register": "Vd.4H" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot90_laneq_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_s32]_z", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x4_t b", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S" }, - "r": { - "register": "Vd.2S" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "DUP", - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_s64]_m", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.8H" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_s64]_x", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.4S" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_f64", + "SIMD_ISA": "SVE", + "name": "svbic[_s64]_z", "arguments": [ - "float64x2_t r", - "float64x2_t a", - "float64x2_t b" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.2D" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -16240,214 +19679,233 @@ ], "instructions": [ [ - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_lane_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_s8]_m", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x4_t b", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "r": { - "register": "Vd.8H" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_lane_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_s8]_x", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x2_t b", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "r": { - "register": "Vd.4S" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_laneq_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_s8]_z", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x8_t b", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.B" }, - "r": { - "register": "Vd.8H" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_laneq_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_u16]_m", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x4_t b", - "const int lane" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "r": { - "register": "Vd.4S" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot180_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_u16]_x", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot180_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_u16]_z", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.4S" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot180_f64", + "SIMD_ISA": "SVE", + "name": "svbic[_u32]_m", "arguments": [ - "float64x2_t r", - "float64x2_t a", - "float64x2_t b" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.2D" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -16455,214 +19913,236 @@ ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot180_lane_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_u32]_x", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x4_t b", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "r": { - "register": "Vd.8H" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot180_lane_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_u32]_z", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x2_t b", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.S" }, - "r": { - "register": "Vd.4S" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot180_laneq_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_u64]_m", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x8_t b", - "const int lane" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "r": { - "register": "Vd.8H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot180_laneq_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_u64]_x", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x4_t b", - "const int lane" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "r": { - "register": "Vd.4S" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot270_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_u64]_z", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.8H" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot270_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_u8]_m", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.4S" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot270_f64", + "SIMD_ISA": "SVE", + "name": "svbic[_u8]_x", "arguments": [ - "float64x2_t r", - "float64x2_t a", - "float64x2_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.2D" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -16670,214 +20150,212 @@ ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot270_lane_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_u8]_z", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x4_t b", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B" }, - "r": { - "register": "Vd.8H" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot270_lane_f32", + "SIMD_ISA": "SVE", + "name": "svbrka[_b]_m", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x2_t b", - "const int lane" + "svbool_t inactive", + "svbool_t pg", + "svbool_t op" ], "return_type": { - "value": "float32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 0 + "inactive": { + "register": "Ptied.B" }, - "r": { - "register": "Vd.4S" + "op": { + "register": "Pop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BRKA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot270_laneq_f16", + "SIMD_ISA": "SVE", + "name": "svbrka[_b]_z", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x8_t b", - "const int lane" + "svbool_t pg", + "svbool_t op" ], "return_type": { - "value": "float16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 3 + "op": { + "register": "Pop.B" }, - "r": { - "register": "Vd.8H" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BRKA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot270_laneq_f32", + "SIMD_ISA": "SVE", + "name": "svbrkb[_b]_m", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x4_t b", - "const int lane" + "svbool_t inactive", + "svbool_t pg", + "svbool_t op" ], "return_type": { - "value": "float32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "inactive": { + "register": "Ptied.B" }, - "r": { - "register": "Vd.4S" + "op": { + "register": "Pop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BRKB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot90_f16", + "SIMD_ISA": "SVE", + "name": "svbrkb[_b]_z", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svbool_t op" ], "return_type": { - "value": "float16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.8H" + "op": { + "register": "Pop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BRKB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot90_f32", + "SIMD_ISA": "SVE", + "name": "svbrkn[_b]_z", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.4S" + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Ptied2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BRKN" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot90_f64", + "SIMD_ISA": "SVE", + "name": "svbrkpa[_b]_z", "arguments": [ - "float64x2_t r", - "float64x2_t a", - "float64x2_t b" + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.2D" + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -16885,368 +20363,446 @@ ], "instructions": [ [ - "FCMLA" + "BRKPA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot90_lane_f16", + "SIMD_ISA": "SVE", + "name": "svbrkpb[_b]_z", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x4_t b", - "const int lane" + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Pop1.B" }, - "r": { - "register": "Vd.8H" + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BRKPB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot90_lane_f32", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_s16]", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x2_t b", - "const int lane" + "svint16_t op1", + "svint16_t op2", + "int16_t op3" ], "return_type": { - "value": "float32x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "r": { - "register": "Vd.4S" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot90_laneq_f16", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_s32]", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x8_t b", - "const int lane" + "svint32_t op1", + "svint32_t op2", + "int32_t op3" ], "return_type": { - "value": "float16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "r": { - "register": "Vd.8H" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot90_laneq_f32", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_s64]", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x4_t b", - "const int lane" + "svint64_t op1", + "svint64_t op2", + "int64_t op3" ], "return_type": { - "value": "float32x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "r": { - "register": "Vd.4S" + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcnt_p8", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_s8]", "arguments": [ - "poly8x8_t a" + "svint8_t op1", + "svint8_t op2", + "int8_t op3" ], "return_type": { - "value": "poly8x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CNT" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcnt_s8", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_u16]", "arguments": [ - "int8x8_t a" + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" ], "return_type": { - "value": "int8x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CNT" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcnt_u8", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_u32]", "arguments": [ - "uint8x8_t a" + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" ], "return_type": { - "value": "uint8x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CNT" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcntq_p8", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_u64]", "arguments": [ - "poly8x16_t a" + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" ], "return_type": { - "value": "poly8x16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CNT" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcntq_s8", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_u8]", "arguments": [ - "int8x16_t a" + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" ], "return_type": { - "value": "int8x16_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CNT" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcntq_u8", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_s16]", "arguments": [ - "uint8x16_t a" + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" ], "return_type": { - "value": "uint8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CNT" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_f16", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_s32]", "arguments": [ - "float16x4_t low", - "float16x4_t high" + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" ], "return_type": { - "value": "float16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.4H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "low": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_f32", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_s64]", "arguments": [ - "float32x2_t low", - "float32x2_t high" + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" ], "return_type": { - "value": "float32x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "low": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_f64", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_s8]", "arguments": [ - "float64x1_t low", - "float64x1_t high" + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" ], "return_type": { - "value": "float64x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.1D" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "low": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" } }, "Architectures": [ @@ -17254,366 +20810,419 @@ ], "instructions": [ [ - "DUP", - "INS" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_p16", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_u16]", "arguments": [ - "poly16x4_t low", - "poly16x4_t high" + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" ], "return_type": { - "value": "poly16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "low": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_p64", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_u32]", "arguments": [ - "poly64x1_t low", - "poly64x1_t high" + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" ], "return_type": { - "value": "poly64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.1D" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "low": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_p8", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_u64]", "arguments": [ - "poly8x8_t low", - "poly8x8_t high" + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" ], "return_type": { - "value": "poly8x16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.8B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "low": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_s16", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_u8]", "arguments": [ - "int16x4_t low", - "int16x4_t high" + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" ], "return_type": { - "value": "int16x8_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.4H" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "low": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_s32", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_s16]", "arguments": [ - "int32x2_t low", - "int32x2_t high" + "svint16_t op1", + "svint16_t op2", + "int16_t op3" ], "return_type": { - "value": "int32x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.2S" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "low": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_s64", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_s32]", "arguments": [ - "int64x1_t low", - "int64x1_t high" + "svint32_t op1", + "svint32_t op2", + "int32_t op3" ], "return_type": { - "value": "int64x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.1D" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "low": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_s8", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_s64]", "arguments": [ - "int8x8_t low", - "int8x8_t high" + "svint64_t op1", + "svint64_t op2", + "int64_t op3" ], "return_type": { - "value": "int8x16_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.8B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "low": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_u16", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_s8]", "arguments": [ - "uint16x4_t low", - "uint16x4_t high" + "svint8_t op1", + "svint8_t op2", + "int8_t op3" ], "return_type": { - "value": "uint16x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.4H" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "low": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_u32", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_u16]", "arguments": [ - "uint32x2_t low", - "uint32x2_t high" + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.2S" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "low": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_u64", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_u32]", "arguments": [ - "uint64x1_t low", - "uint64x1_t high" + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.1D" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "low": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_u8", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_u64]", "arguments": [ - "uint8x8_t low", - "uint8x8_t high" + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.8B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "low": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_f32", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_u8]", "arguments": [ - "float32x2_t a", - "const int lane1", - "float32x2_t b", - "const int lane2" + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" ], "return_type": { - "value": "float32x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "lane1": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.B" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ @@ -17621,36 +21230,34 @@ ], "instructions": [ [ - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_f64", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_s16]", "arguments": [ - "float64x1_t a", - "const int lane1", - "float64x1_t b", - "const int lane2" + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" ], "return_type": { - "value": "float64x1_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "UNUSED" - }, - "b": { - "register": "Vn.1D" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "lane1": { - "minimum": 0, - "maximum": 0 + "op2": { + "register": "Zop2.H" }, - "lane2": { - "minimum": 0, - "maximum": 0 + "op3": { + "register": "Zop3.H" } }, "Architectures": [ @@ -17658,36 +21265,34 @@ ], "instructions": [ [ - "DUP" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_p16", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_s32]", "arguments": [ - "poly16x4_t a", - "const int lane1", - "poly16x4_t b", - "const int lane2" + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" ], "return_type": { - "value": "poly16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "lane1": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.S" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "op3": { + "register": "Zop3.S" } }, "Architectures": [ @@ -17695,74 +21300,69 @@ ], "instructions": [ [ - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_p64", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_s64]", "arguments": [ - "poly64x1_t a", - "const int lane1", - "poly64x1_t b", - "const int lane2" + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" ], "return_type": { - "value": "poly64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "UNUSED" - }, - "b": { - "register": "Vn.1D" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "lane1": { - "minimum": 0, - "maximum": 0 + "op2": { + "register": "Zop2.D" }, - "lane2": { - "minimum": 0, - "maximum": 0 + "op3": { + "register": "Zop3.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "DUP" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_p8", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_s8]", "arguments": [ - "poly8x8_t a", - "const int lane1", - "poly8x8_t b", - "const int lane2" + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" ], "return_type": { - "value": "poly8x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vn.8B" - }, - "lane1": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.B" }, - "lane2": { - "minimum": 0, - "maximum": 7 + "op3": { + "register": "Zop3.B" } }, "Architectures": [ @@ -17770,36 +21370,34 @@ ], "instructions": [ [ - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_s16", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_u16]", "arguments": [ - "int16x4_t a", - "const int lane1", - "int16x4_t b", - "const int lane2" + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" ], "return_type": { - "value": "int16x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.4H" - }, - "lane1": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.H" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "op3": { + "register": "Zop3.H" } }, "Architectures": [ @@ -17807,36 +21405,34 @@ ], "instructions": [ [ - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_s32", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_u32]", "arguments": [ - "int32x2_t a", - "const int lane1", - "int32x2_t b", - "const int lane2" + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" ], "return_type": { - "value": "int32x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.2S" - }, - "lane1": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.S" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op3": { + "register": "Zop3.S" } }, "Architectures": [ @@ -17844,36 +21440,34 @@ ], "instructions": [ [ - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_s64", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_u64]", "arguments": [ - "int64x1_t a", - "const int lane1", - "int64x1_t b", - "const int lane2" + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" ], "return_type": { - "value": "int64x1_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "UNUSED" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.1D" - }, - "lane1": { - "minimum": 0, - "maximum": 0 + "op2": { + "register": "Zop2.D" }, - "lane2": { - "minimum": 0, - "maximum": 0 + "op3": { + "register": "Zop3.D" } }, "Architectures": [ @@ -17881,36 +21475,34 @@ ], "instructions": [ [ - "DUP" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_s8", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_u8]", "arguments": [ - "int8x8_t a", - "const int lane1", - "int8x8_t b", - "const int lane2" + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" ], "return_type": { - "value": "int8x8_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vn.8B" - }, - "lane1": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.B" }, - "lane2": { - "minimum": 0, - "maximum": 7 + "op3": { + "register": "Zop3.B" } }, "Architectures": [ @@ -17918,36 +21510,34 @@ ], "instructions": [ [ - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_u16", + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_s16]", "arguments": [ - "uint16x4_t a", - "const int lane1", - "uint16x4_t b", - "const int lane2" + "svint16_t op1", + "svint16_t op2", + "int16_t op3" ], "return_type": { - "value": "uint16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.4H" - }, - "lane1": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.H" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ @@ -17955,36 +21545,34 @@ ], "instructions": [ [ - "INS" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_u32", + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_s32]", "arguments": [ - "uint32x2_t a", - "const int lane1", - "uint32x2_t b", - "const int lane2" + "svint32_t op1", + "svint32_t op2", + "int32_t op3" ], "return_type": { - "value": "uint32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "lane1": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.S" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ @@ -17992,36 +21580,34 @@ ], "instructions": [ [ - "INS" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_u64", + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_s64]", "arguments": [ - "uint64x1_t a", - "const int lane1", - "uint64x1_t b", - "const int lane2" + "svint64_t op1", + "svint64_t op2", + "int64_t op3" ], "return_type": { - "value": "uint64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "UNUSED" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.1D" - }, - "lane1": { - "minimum": 0, - "maximum": 0 + "op2": { + "register": "Zop2.D" }, - "lane2": { - "minimum": 0, - "maximum": 0 + "op3": { + "register": "Zop3.D[*]" } }, "Architectures": [ @@ -18029,36 +21615,34 @@ ], "instructions": [ [ - "DUP" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_u8", + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_s8]", "arguments": [ - "uint8x8_t a", - "const int lane1", - "uint8x8_t b", - "const int lane2" + "svint8_t op1", + "svint8_t op2", + "int8_t op3" ], "return_type": { - "value": "uint8x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" - }, - "b": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "lane1": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.B" }, - "lane2": { - "minimum": 0, - "maximum": 7 + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ @@ -18066,36 +21650,34 @@ ], "instructions": [ [ - "INS" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_f32", + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_u16]", "arguments": [ - "float32x2_t a", - "const int lane1", - "float32x4_t b", - "const int lane2" + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" ], "return_type": { - "value": "float32x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2S" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.4S" - }, - "lane1": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.H" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ @@ -18103,36 +21685,34 @@ ], "instructions": [ [ - "INS" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_f64", + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_u32]", "arguments": [ - "float64x1_t a", - "const int lane1", - "float64x2_t b", - "const int lane2" + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" ], "return_type": { - "value": "float64x1_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "UNUSED" - }, - "b": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "lane1": { - "minimum": 0, - "maximum": 0 + "op2": { + "register": "Zop2.S" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ @@ -18140,36 +21720,34 @@ ], "instructions": [ [ - "DUP" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_p16", + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_u64]", "arguments": [ - "poly16x4_t a", - "const int lane1", - "poly16x8_t b", - "const int lane2" + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" ], "return_type": { - "value": "poly16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4H" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.8H" - }, - "lane1": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.D" }, - "lane2": { - "minimum": 0, - "maximum": 7 + "op3": { + "register": "Zop3.D[*]" } }, "Architectures": [ @@ -18177,74 +21755,69 @@ ], "instructions": [ [ - "INS" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_p64", + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_u8]", "arguments": [ - "poly64x1_t a", - "const int lane1", - "poly64x2_t b", - "const int lane2" + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" ], "return_type": { - "value": "poly64x1_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "UNUSED" - }, - "b": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "lane1": { - "minimum": 0, - "maximum": 0 + "op2": { + "register": "Zop2.B" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "DUP" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_p8", + "SIMD_ISA": "SVE2", + "name": "svbsl[_s16]", "arguments": [ - "poly8x8_t a", - "const int lane1", - "poly8x16_t b", - "const int lane2" + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" ], "return_type": { - "value": "poly8x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" - }, - "b": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "lane1": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.H" }, - "lane2": { - "minimum": 0, - "maximum": 15 + "op3": { + "register": "Zop3.H" } }, "Architectures": [ @@ -18252,36 +21825,34 @@ ], "instructions": [ [ - "INS" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_s16", + "SIMD_ISA": "SVE2", + "name": "svbsl[_s32]", "arguments": [ - "int16x4_t a", - "const int lane1", - "int16x8_t b", - "const int lane2" + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" ], "return_type": { - "value": "int16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "lane1": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.S" }, - "lane2": { - "minimum": 0, - "maximum": 7 + "op3": { + "register": "Zop3.S" } }, "Architectures": [ @@ -18289,36 +21860,34 @@ ], "instructions": [ [ - "INS" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_s32", + "SIMD_ISA": "SVE2", + "name": "svbsl[_s64]", "arguments": [ - "int32x2_t a", - "const int lane1", - "int32x4_t b", - "const int lane2" + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" ], "return_type": { - "value": "int32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "lane1": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.D" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "op3": { + "register": "Zop3.D" } }, "Architectures": [ @@ -18326,36 +21895,34 @@ ], "instructions": [ [ - "INS" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_s64", + "SIMD_ISA": "SVE2", + "name": "svbsl[_s8]", "arguments": [ - "int64x1_t a", - "const int lane1", - "int64x2_t b", - "const int lane2" + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" ], "return_type": { - "value": "int64x1_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "UNUSED" - }, - "b": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "lane1": { - "minimum": 0, - "maximum": 0 + "op2": { + "register": "Zop2.B" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op3": { + "register": "Zop3.B" } }, "Architectures": [ @@ -18363,36 +21930,34 @@ ], "instructions": [ [ - "DUP" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_s8", + "SIMD_ISA": "SVE2", + "name": "svbsl[_u16]", "arguments": [ - "int8x8_t a", - "const int lane1", - "int8x16_t b", - "const int lane2" + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" ], "return_type": { - "value": "int8x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" - }, - "b": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "lane1": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.H" }, - "lane2": { - "minimum": 0, - "maximum": 15 + "op3": { + "register": "Zop3.H" } }, "Architectures": [ @@ -18400,36 +21965,34 @@ ], "instructions": [ [ - "INS" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_u16", + "SIMD_ISA": "SVE2", + "name": "svbsl[_u32]", "arguments": [ - "uint16x4_t a", - "const int lane1", - "uint16x8_t b", - "const int lane2" + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "lane1": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.S" }, - "lane2": { - "minimum": 0, - "maximum": 7 + "op3": { + "register": "Zop3.S" } }, "Architectures": [ @@ -18437,36 +22000,34 @@ ], "instructions": [ [ - "INS" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_u32", + "SIMD_ISA": "SVE2", + "name": "svbsl[_u64]", "arguments": [ - "uint32x2_t a", - "const int lane1", - "uint32x4_t b", - "const int lane2" + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "lane1": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.D" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "op3": { + "register": "Zop3.D" } }, "Architectures": [ @@ -18474,36 +22035,34 @@ ], "instructions": [ [ - "INS" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_u64", + "SIMD_ISA": "SVE2", + "name": "svbsl[_u8]", "arguments": [ - "uint64x1_t a", - "const int lane1", - "uint64x2_t b", - "const int lane2" + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "UNUSED" - }, - "b": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "lane1": { - "minimum": 0, - "maximum": 0 + "op2": { + "register": "Zop2.B" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op3": { + "register": "Zop3.B" } }, "Architectures": [ @@ -18511,36 +22070,35 @@ ], "instructions": [ [ - "DUP" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_u8", + "SIMD_ISA": "SVE", + "name": "svcadd[_f16]_m", "arguments": [ - "uint8x8_t a", - "const int lane1", - "uint8x16_t b", - "const int lane2" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint8x8_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" - }, - "b": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "lane1": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.H" }, - "lane2": { - "minimum": 0, - "maximum": 15 + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -18548,36 +22106,35 @@ ], "instructions": [ [ - "INS" + "FCADD" + ], + [ + "MOVPRFX", + "FCADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_f32", + "SIMD_ISA": "SVE", + "name": "svcadd[_f16]_x", "arguments": [ - "float32x4_t a", - "const int lane1", - "float32x2_t b", - "const int lane2" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "float32x4_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "lane1": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.H" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -18585,36 +22142,35 @@ ], "instructions": [ [ - "INS" + "FCADD" + ], + [ + "MOVPRFX", + "FCADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_f64", + "SIMD_ISA": "SVE", + "name": "svcadd[_f16]_z", "arguments": [ - "float64x2_t a", - "const int lane1", - "float64x1_t b", - "const int lane2" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "float64x2_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.1D" + "op1": { + "register": "Zop1.H" }, - "lane1": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.H" }, - "lane2": { - "minimum": 0, - "maximum": 0 + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -18622,36 +22178,32 @@ ], "instructions": [ [ - "INS" + "MOVPRFX", + "FCADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_p16", + "SIMD_ISA": "SVE", + "name": "svcadd[_f32]_m", "arguments": [ - "poly16x8_t a", - "const int lane1", - "poly16x4_t b", - "const int lane2" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "poly16x8_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "lane1": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.S" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -18659,74 +22211,71 @@ ], "instructions": [ [ - "INS" + "FCADD" + ], + [ + "MOVPRFX", + "FCADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_p64", + "SIMD_ISA": "SVE", + "name": "svcadd[_f32]_x", "arguments": [ - "poly64x2_t a", - "const int lane1", - "poly64x1_t b", - "const int lane2" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "poly64x2_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.1D" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "lane1": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.S" }, - "lane2": { - "minimum": 0, - "maximum": 0 + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "INS" + "FCADD" + ], + [ + "MOVPRFX", + "FCADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_p8", + "SIMD_ISA": "SVE", + "name": "svcadd[_f32]_z", "arguments": [ - "poly8x16_t a", - "const int lane1", - "poly8x8_t b", - "const int lane2" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "poly8x16_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" - }, - "b": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S" }, - "lane1": { - "minimum": 0, - "maximum": 15 + "op2": { + "register": "Zop2.S" }, - "lane2": { - "minimum": 0, - "maximum": 7 + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -18734,36 +22283,32 @@ ], "instructions": [ [ - "INS" + "MOVPRFX", + "FCADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_s16", + "SIMD_ISA": "SVE", + "name": "svcadd[_f64]_m", "arguments": [ - "int16x8_t a", - "const int lane1", - "int16x4_t b", - "const int lane2" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "int16x8_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "lane1": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.D" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -18771,36 +22316,35 @@ ], "instructions": [ [ - "INS" + "FCADD" + ], + [ + "MOVPRFX", + "FCADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_s32", + "SIMD_ISA": "SVE", + "name": "svcadd[_f64]_x", "arguments": [ - "int32x4_t a", - "const int lane1", - "int32x2_t b", - "const int lane2" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "int32x4_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.2S" - }, - "lane1": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.D" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -18808,36 +22352,35 @@ ], "instructions": [ [ - "INS" + "FCADD" + ], + [ + "MOVPRFX", + "FCADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_s64", + "SIMD_ISA": "SVE", + "name": "svcadd[_f64]_z", "arguments": [ - "int64x2_t a", - "const int lane1", - "int64x1_t b", - "const int lane2" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "int64x2_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.1D" + "op1": { + "register": "Zop1.D" }, - "lane1": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.D" }, - "lane2": { - "minimum": 0, - "maximum": 0 + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -18845,36 +22388,28 @@ ], "instructions": [ [ - "INS" + "MOVPRFX", + "FCADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_s8", + "SIMD_ISA": "SVE2", + "name": "svcadd[_s16]", "arguments": [ - "int8x16_t a", - "const int lane1", - "int8x8_t b", - "const int lane2" + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "int8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" - }, - "b": { - "register": "Vn.8B" - }, - "lane1": { - "minimum": 0, - "maximum": 15 + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "lane2": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -18882,36 +22417,31 @@ ], "instructions": [ [ - "INS" + "CADD" + ], + [ + "MOVPRFX", + "CADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_u16", + "SIMD_ISA": "SVE2", + "name": "svcadd[_s32]", "arguments": [ - "uint16x8_t a", - "const int lane1", - "uint16x4_t b", - "const int lane2" + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.4H" - }, - "lane1": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.S" } }, "Architectures": [ @@ -18919,36 +22449,31 @@ ], "instructions": [ [ - "INS" + "CADD" + ], + [ + "MOVPRFX", + "CADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_u32", + "SIMD_ISA": "SVE2", + "name": "svcadd[_s64]", "arguments": [ - "uint32x4_t a", - "const int lane1", - "uint32x2_t b", - "const int lane2" + "svint64_t op1", + "svint64_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint32x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.2S" - }, - "lane1": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.D" } }, "Architectures": [ @@ -18956,36 +22481,31 @@ ], "instructions": [ [ - "INS" + "CADD" + ], + [ + "MOVPRFX", + "CADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_u64", + "SIMD_ISA": "SVE2", + "name": "svcadd[_s8]", "arguments": [ - "uint64x2_t a", - "const int lane1", - "uint64x1_t b", - "const int lane2" + "svint8_t op1", + "svint8_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint64x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.1D" - }, - "lane1": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "lane2": { - "minimum": 0, - "maximum": 0 + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -18993,36 +22513,31 @@ ], "instructions": [ [ - "INS" + "CADD" + ], + [ + "MOVPRFX", + "CADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_u8", + "SIMD_ISA": "SVE2", + "name": "svcadd[_u16]", "arguments": [ - "uint8x16_t a", - "const int lane1", - "uint8x8_t b", - "const int lane2" + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" - }, - "b": { - "register": "Vn.8B" - }, - "lane1": { - "minimum": 0, - "maximum": 15 + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "lane2": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -19030,36 +22545,31 @@ ], "instructions": [ [ - "INS" + "CADD" + ], + [ + "MOVPRFX", + "CADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_f32", + "SIMD_ISA": "SVE2", + "name": "svcadd[_u32]", "arguments": [ - "float32x4_t a", - "const int lane1", - "float32x4_t b", - "const int lane2" + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "float32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" - }, - "lane1": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.S" } }, "Architectures": [ @@ -19067,36 +22577,31 @@ ], "instructions": [ [ - "INS" + "CADD" + ], + [ + "MOVPRFX", + "CADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_f64", + "SIMD_ISA": "SVE2", + "name": "svcadd[_u64]", "arguments": [ - "float64x2_t a", - "const int lane1", - "float64x2_t b", - "const int lane2" + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "float64x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.2D" - }, - "lane1": { - "minimum": 0, - "maximum": 1 - }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.D" } }, "Architectures": [ @@ -19104,36 +22609,31 @@ ], "instructions": [ [ - "INS" + "CADD" + ], + [ + "MOVPRFX", + "CADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_p16", + "SIMD_ISA": "SVE2", + "name": "svcadd[_u8]", "arguments": [ - "poly16x8_t a", - "const int lane1", - "poly16x8_t b", - "const int lane2" + "svuint8_t op1", + "svuint8_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "poly16x8_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "lane1": { - "minimum": 0, - "maximum": 7 - }, - "lane2": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -19141,74 +22641,71 @@ ], "instructions": [ [ - "INS" + "CADD" + ], + [ + "MOVPRFX", + "CADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_p64", + "SIMD_ISA": "SVE2", + "name": "svcdot[_s32]", "arguments": [ - "poly64x2_t a", - "const int lane1", - "poly64x2_t b", - "const int lane2" + "svint32_t op1", + "svint8_t op2", + "svint8_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "poly64x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "lane1": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.B" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op3": { + "register": "Zop3.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "INS" + "CDOT" + ], + [ + "MOVPRFX", + "CDOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_p8", + "SIMD_ISA": "SVE2", + "name": "svcdot[_s64]", "arguments": [ - "poly8x16_t a", - "const int lane1", - "poly8x16_t b", - "const int lane2" + "svint64_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "poly8x16_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" - }, - "b": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "lane1": { - "minimum": 0, - "maximum": 15 + "op2": { + "register": "Zop2.H" }, - "lane2": { - "minimum": 0, - "maximum": 15 + "op3": { + "register": "Zop3.H" } }, "Architectures": [ @@ -19216,36 +22713,40 @@ ], "instructions": [ [ - "INS" + "CDOT" + ], + [ + "MOVPRFX", + "CDOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_s16", + "SIMD_ISA": "SVE2", + "name": "svcdot_lane[_s32]", "arguments": [ - "int16x8_t a", - "const int lane1", - "int16x8_t b", - "const int lane2" + "svint32_t op1", + "svint8_t op2", + "svint8_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" ], "return_type": { - "value": "int16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" + "imm_index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "lane1": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.B" }, - "lane2": { - "minimum": 0, - "maximum": 7 + "op3": { + "register": "Zop3.B" } }, "Architectures": [ @@ -19253,36 +22754,40 @@ ], "instructions": [ [ - "INS" + "CDOT" + ], + [ + "MOVPRFX", + "CDOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_s32", + "SIMD_ISA": "SVE2", + "name": "svcdot_lane[_s64]", "arguments": [ - "int32x4_t a", - "const int lane1", - "int32x4_t b", - "const int lane2" + "svint64_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" ], "return_type": { - "value": "int32x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" + "imm_index": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "lane1": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.H" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "op3": { + "register": "Zop3.H" } }, "Architectures": [ @@ -19290,36 +22795,34 @@ ], "instructions": [ [ - "INS" + "CDOT" + ], + [ + "MOVPRFX", + "CDOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_s64", + "SIMD_ISA": "SVE2", + "name": "svclamp[_f16]", "arguments": [ - "int64x2_t a", - "const int lane1", - "int64x2_t b", - "const int lane2" + "svfloat16_t op", + "svfloat16_t min", + "svfloat16_t max" ], "return_type": { - "value": "int64x2_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.2D" + "max": { + "register": "Zreg3.H" }, - "lane1": { - "minimum": 0, - "maximum": 1 + "min": { + "register": "Zreg2.H" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op": { + "register": "Zreg1.H" } }, "Architectures": [ @@ -19327,36 +22830,30 @@ ], "instructions": [ [ - "INS" + "FCLAMP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_s8", + "SIMD_ISA": "SVE2", + "name": "svclamp[_f32]", "arguments": [ - "int8x16_t a", - "const int lane1", - "int8x16_t b", - "const int lane2" + "svfloat32_t op", + "svfloat32_t min", + "svfloat32_t max" ], "return_type": { - "value": "int8x16_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "max": { + "register": "Zreg3.S" }, - "b": { - "register": "Vn.16B" - }, - "lane1": { - "minimum": 0, - "maximum": 15 + "min": { + "register": "Zreg2.S" }, - "lane2": { - "minimum": 0, - "maximum": 15 + "op": { + "register": "Zreg1.S" } }, "Architectures": [ @@ -19364,36 +22861,30 @@ ], "instructions": [ [ - "INS" + "FCLAMP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_u16", + "SIMD_ISA": "SVE2", + "name": "svclamp[_f64]", "arguments": [ - "uint16x8_t a", - "const int lane1", - "uint16x8_t b", - "const int lane2" + "svfloat64_t op", + "svfloat64_t min", + "svfloat64_t max" ], "return_type": { - "value": "uint16x8_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8H" + "max": { + "register": "Zreg3.D" }, - "lane1": { - "minimum": 0, - "maximum": 7 + "min": { + "register": "Zreg2.D" }, - "lane2": { - "minimum": 0, - "maximum": 7 + "op": { + "register": "Zreg1.D" } }, "Architectures": [ @@ -19401,36 +22892,30 @@ ], "instructions": [ [ - "INS" + "FCLAMP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_u32", + "SIMD_ISA": "SVE2", + "name": "svclamp[_s16]", "arguments": [ - "uint32x4_t a", - "const int lane1", - "uint32x4_t b", - "const int lane2" + "svint16_t op", + "svint16_t min", + "svint16_t max" ], "return_type": { - "value": "uint32x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" + "max": { + "register": "Zreg3.H" }, - "lane1": { - "minimum": 0, - "maximum": 3 + "min": { + "register": "Zreg2.H" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "op": { + "register": "Zreg1.H" } }, "Architectures": [ @@ -19438,36 +22923,30 @@ ], "instructions": [ [ - "INS" + "SCLAMP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_u64", + "SIMD_ISA": "SVE2", + "name": "svclamp[_s32]", "arguments": [ - "uint64x2_t a", - "const int lane1", - "uint64x2_t b", - "const int lane2" + "svint32_t op", + "svint32_t min", + "svint32_t max" ], "return_type": { - "value": "uint64x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.2D" + "max": { + "register": "Zreg3.S" }, - "lane1": { - "minimum": 0, - "maximum": 1 + "min": { + "register": "Zreg2.S" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op": { + "register": "Zreg1.S" } }, "Architectures": [ @@ -19475,36 +22954,30 @@ ], "instructions": [ [ - "INS" + "SCLAMP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_u8", + "SIMD_ISA": "SVE2", + "name": "svclamp[_s64]", "arguments": [ - "uint8x16_t a", - "const int lane1", - "uint8x16_t b", - "const int lane2" + "svint64_t op", + "svint64_t min", + "svint64_t max" ], "return_type": { - "value": "uint8x16_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "max": { + "register": "Zreg3.D" }, - "b": { - "register": "Vn.16B" - }, - "lane1": { - "minimum": 0, - "maximum": 15 + "min": { + "register": "Zreg2.D" }, - "lane2": { - "minimum": 0, - "maximum": 15 + "op": { + "register": "Zreg1.D" } }, "Architectures": [ @@ -19512,72 +22985,92 @@ ], "instructions": [ [ - "INS" + "SCLAMP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_f16", + "SIMD_ISA": "SVE2", + "name": "svclamp[_s8]", "arguments": [ - "uint64_t a" + "svint8_t op", + "svint8_t min", + "svint8_t max" ], "return_type": { - "value": "float16x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "max": { + "register": "Zreg3.B" + }, + "min": { + "register": "Zreg2.B" + }, + "op": { + "register": "Zreg1.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "SCLAMP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_f32", + "SIMD_ISA": "SVE2", + "name": "svclamp[_u16]", "arguments": [ - "uint64_t a" + "svuint16_t op", + "svuint16_t min", + "svuint16_t max" ], "return_type": { - "value": "float32x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "max": { + "register": "Zreg3.H" + }, + "min": { + "register": "Zreg2.H" + }, + "op": { + "register": "Zreg1.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "UCLAMP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_f64", + "SIMD_ISA": "SVE2", + "name": "svclamp[_u32]", "arguments": [ - "uint64_t a" + "svuint32_t op", + "svuint32_t min", + "svuint32_t max" ], "return_type": { - "value": "float64x1_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "max": { + "register": "Zreg3.S" + }, + "min": { + "register": "Zreg2.S" + }, + "op": { + "register": "Zreg1.S" } }, "Architectures": [ @@ -19585,394 +23078,537 @@ ], "instructions": [ [ - "INS" + "UCLAMP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_p16", + "SIMD_ISA": "SVE2", + "name": "svclamp[_u64]", "arguments": [ - "uint64_t a" + "svuint64_t op", + "svuint64_t min", + "svuint64_t max" ], "return_type": { - "value": "poly16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "max": { + "register": "Zreg3.D" + }, + "min": { + "register": "Zreg2.D" + }, + "op": { + "register": "Zreg1.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "UCLAMP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_p64", + "SIMD_ISA": "SVE2", + "name": "svclamp[_u8]", "arguments": [ - "uint64_t a" + "svuint8_t op", + "svuint8_t min", + "svuint8_t max" ], "return_type": { - "value": "poly64x1_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "max": { + "register": "Zreg3.B" + }, + "min": { + "register": "Zreg2.B" + }, + "op": { + "register": "Zreg1.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "INS" + "UCLAMP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_p8", + "SIMD_ISA": "SVE", + "name": "svclasta[_f16]", "arguments": [ - "uint64_t a" + "svbool_t pg", + "svfloat16_t fallback", + "svfloat16_t data" ], "return_type": { - "value": "poly8x8_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Zfallback.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_s16", + "SIMD_ISA": "SVE", + "name": "svclasta[_f32]", "arguments": [ - "uint64_t a" + "svbool_t pg", + "svfloat32_t fallback", + "svfloat32_t data" ], "return_type": { - "value": "int16x4_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Zfallback.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_s32", + "SIMD_ISA": "SVE", + "name": "svclasta[_f64]", "arguments": [ - "uint64_t a" + "svbool_t pg", + "svfloat64_t fallback", + "svfloat64_t data" ], "return_type": { - "value": "int32x2_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Zfallback.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_s64", + "SIMD_ISA": "SVE", + "name": "svclasta[_n_f16]", "arguments": [ - "uint64_t a" + "svbool_t pg", + "float16_t fallback", + "svfloat16_t data" ], "return_type": { - "value": "int64x1_t" + "value": "float16_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Htied|Wtied" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "CLASTA" + ], + [ + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_s8", + "SIMD_ISA": "SVE", + "name": "svclasta[_n_f32]", "arguments": [ - "uint64_t a" + "svbool_t pg", + "float32_t fallback", + "svfloat32_t data" ], "return_type": { - "value": "int8x8_t" + "value": "float32_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Stied|Wtied" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "CLASTA" + ], + [ + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_u16", + "SIMD_ISA": "SVE", + "name": "svclasta[_n_f64]", "arguments": [ - "uint64_t a" + "svbool_t pg", + "float64_t fallback", + "svfloat64_t data" ], "return_type": { - "value": "uint16x4_t" + "value": "float64_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Dtied|Xtied" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "CLASTA" + ], + [ + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_u32", + "SIMD_ISA": "SVE", + "name": "svclasta[_n_s16]", "arguments": [ - "uint64_t a" + "svbool_t pg", + "int16_t fallback", + "svint16_t data" ], "return_type": { - "value": "uint32x2_t" + "value": "int16_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Htied|Wtied" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "CLASTA" + ], + [ + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_u64", + "SIMD_ISA": "SVE", + "name": "svclasta[_n_s32]", "arguments": [ - "uint64_t a" + "svbool_t pg", + "int32_t fallback", + "svint32_t data" ], "return_type": { - "value": "uint64x1_t" + "value": "int32_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Stied|Wtied" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "CLASTA" + ], + [ + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_u8", + "SIMD_ISA": "SVE", + "name": "svclasta[_n_s64]", "arguments": [ - "uint64_t a" + "svbool_t pg", + "int64_t fallback", + "svint64_t data" ], "return_type": { - "value": "uint8x8_t" + "value": "int64_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Dtied|Xtied" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "CLASTA" + ], + [ + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_f16_f32", + "SIMD_ISA": "SVE", + "name": "svclasta[_n_s8]", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "int8_t fallback", + "svint8_t data" ], "return_type": { - "value": "float16x4_t" + "value": "int8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Btied|Wtied" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCVTN" + "CLASTA" + ], + [ + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_f16_s16", + "SIMD_ISA": "SVE", + "name": "svclasta[_n_u16]", "arguments": [ - "int16x4_t a" + "svbool_t pg", + "uint16_t fallback", + "svuint16_t data" ], "return_type": { - "value": "float16x4_t" + "value": "uint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Htied|Wtied" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SCVTF" + "CLASTA" + ], + [ + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_f16_u16", + "SIMD_ISA": "SVE", + "name": "svclasta[_n_u32]", "arguments": [ - "uint16x4_t a" + "svbool_t pg", + "uint32_t fallback", + "svuint32_t data" ], "return_type": { - "value": "float16x4_t" + "value": "uint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Stied|Wtied" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "UCVTF" + "CLASTA" + ], + [ + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_f32_f16", + "SIMD_ISA": "SVE", + "name": "svclasta[_n_u64]", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "uint64_t fallback", + "svuint64_t data" ], "return_type": { - "value": "float32x4_t" + "value": "uint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Dtied|Xtied" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCVTL" + "CLASTA" + ], + [ + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_f32_f64", + "SIMD_ISA": "SVE", + "name": "svclasta[_n_u8]", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "uint8_t fallback", + "svuint8_t data" ], "return_type": { - "value": "float32x2_t" + "value": "uint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Btied|Wtied" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -19980,72 +23616,103 @@ ], "instructions": [ [ - "FCVTN" + "CLASTA" + ], + [ + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_f32_s32", + "SIMD_ISA": "SVE", + "name": "svclasta[_s16]", "arguments": [ - "int32x2_t a" + "svbool_t pg", + "svint16_t fallback", + "svint16_t data" ], "return_type": { - "value": "float32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Zfallback.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SCVTF" + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_f32_u32", + "SIMD_ISA": "SVE", + "name": "svclasta[_s32]", "arguments": [ - "uint32x2_t a" + "svbool_t pg", + "svint32_t fallback", + "svint32_t data" ], "return_type": { - "value": "float32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Zfallback.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UCVTF" + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_f64_f32", + "SIMD_ISA": "SVE", + "name": "svclasta[_s64]", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svint64_t fallback", + "svint64_t data" ], "return_type": { - "value": "float64x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Zfallback.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -20053,22 +23720,34 @@ ], "instructions": [ [ - "FCVTL" + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_f64_s64", + "SIMD_ISA": "SVE", + "name": "svclasta[_s8]", "arguments": [ - "int64x1_t a" + "svbool_t pg", + "svint8_t fallback", + "svint8_t data" ], "return_type": { - "value": "float64x1_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Zfallback.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -20076,22 +23755,34 @@ ], "instructions": [ [ - "SCVTF" + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_f64_u64", + "SIMD_ISA": "SVE", + "name": "svclasta[_u16]", "arguments": [ - "uint64x1_t a" + "svbool_t pg", + "svuint16_t fallback", + "svuint16_t data" ], "return_type": { - "value": "float64x1_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Zfallback.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -20099,26 +23790,34 @@ ], "instructions": [ [ - "UCVTF" + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_high_f16_f32", + "SIMD_ISA": "SVE", + "name": "svclasta[_u32]", "arguments": [ - "float16x4_t r", - "float32x4_t a" + "svbool_t pg", + "svuint32_t fallback", + "svuint32_t data" ], "return_type": { - "value": "float16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "data": { + "register": "Zdata.S" }, - "r": { - "register": "Vd.4H" + "fallback": { + "register": "Zfallback.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -20126,22 +23825,34 @@ ], "instructions": [ [ - "FCVTN2" + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_high_f32_f16", + "SIMD_ISA": "SVE", + "name": "svclasta[_u64]", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svuint64_t fallback", + "svuint64_t data" ], "return_type": { - "value": "float32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Zfallback.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -20149,26 +23860,34 @@ ], "instructions": [ [ - "FCVTL2" + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_high_f32_f64", + "SIMD_ISA": "SVE", + "name": "svclasta[_u8]", "arguments": [ - "float32x2_t r", - "float64x2_t a" + "svbool_t pg", + "svuint8_t fallback", + "svuint8_t data" ], "return_type": { - "value": "float32x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "data": { + "register": "Zdata.B" }, - "r": { - "register": "Vd.2S" + "fallback": { + "register": "Zfallback.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -20176,22 +23895,34 @@ ], "instructions": [ [ - "FCVTN2" + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_high_f64_f32", + "SIMD_ISA": "SVE", + "name": "svclastb[_f16]", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svfloat16_t fallback", + "svfloat16_t data" ], "return_type": { - "value": "float64x2_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Zfallback.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -20199,145 +23930,172 @@ ], "instructions": [ [ - "FCVTL2" + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_f16_s16", + "SIMD_ISA": "SVE", + "name": "svclastb[_f32]", "arguments": [ - "int16x4_t a", - "const int n" + "svbool_t pg", + "svfloat32_t fallback", + "svfloat32_t data" ], "return_type": { - "value": "float16x4_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "data": { + "register": "Zdata.S" }, - "n": { - "minimum": 1, - "maximum": 16 + "fallback": { + "register": "Zfallback.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SCVTF" + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_f16_u16", + "SIMD_ISA": "SVE", + "name": "svclastb[_f64]", "arguments": [ - "uint16x4_t a", - "const int n" + "svbool_t pg", + "svfloat64_t fallback", + "svfloat64_t data" ], "return_type": { - "value": "float16x4_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "data": { + "register": "Zdata.D" }, - "n": { - "minimum": 1, - "maximum": 16 + "fallback": { + "register": "Zfallback.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "UCVTF" + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_f32_s32", + "SIMD_ISA": "SVE", + "name": "svclastb[_n_f16]", "arguments": [ - "int32x2_t a", - "const int n" + "svbool_t pg", + "float16_t fallback", + "svfloat16_t data" ], "return_type": { - "value": "float32x2_t" + "value": "float16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "data": { + "register": "Zdata.H" }, - "n": { - "minimum": 1, - "maximum": 32 + "fallback": { + "register": "Htied|Wtied" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SCVTF" + "CLASTB" + ], + [ + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_f32_u32", + "SIMD_ISA": "SVE", + "name": "svclastb[_n_f32]", "arguments": [ - "uint32x2_t a", - "const int n" + "svbool_t pg", + "float32_t fallback", + "svfloat32_t data" ], "return_type": { - "value": "float32x2_t" + "value": "float32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "data": { + "register": "Zdata.S" }, - "n": { - "minimum": 1, - "maximum": 32 + "fallback": { + "register": "Stied|Wtied" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UCVTF" + "CLASTB" + ], + [ + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_f64_s64", + "SIMD_ISA": "SVE", + "name": "svclastb[_n_f64]", "arguments": [ - "int64x1_t a", - "const int n" + "svbool_t pg", + "float64_t fallback", + "svfloat64_t data" ], "return_type": { - "value": "float64x1_t" + "value": "float64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "data": { + "register": "Zdata.D" }, - "n": { - "minimum": 1, - "maximum": 64 + "fallback": { + "register": "Dtied|Xtied" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -20345,27 +24103,33 @@ ], "instructions": [ [ - "SCVTF" + "CLASTB" + ], + [ + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_f64_u64", + "SIMD_ISA": "SVE", + "name": "svclastb[_n_s16]", "arguments": [ - "uint64x1_t a", - "const int n" + "svbool_t pg", + "int16_t fallback", + "svint16_t data" ], "return_type": { - "value": "float64x1_t" + "value": "int16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "data": { + "register": "Zdata.H" }, - "n": { - "minimum": 1, - "maximum": 64 + "fallback": { + "register": "Htied|Wtied" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -20373,86 +24137,101 @@ ], "instructions": [ [ - "UCVTF" + "CLASTB" + ], + [ + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_s16_f16", + "SIMD_ISA": "SVE", + "name": "svclastb[_n_s32]", "arguments": [ - "float16x4_t a", - "const int n" + "svbool_t pg", + "int32_t fallback", + "svint32_t data" ], "return_type": { - "value": "int16x4_t" + "value": "int32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "data": { + "register": "Zdata.S" }, - "n": { - "minimum": 1, - "maximum": 16 + "fallback": { + "register": "Stied|Wtied" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "CLASTB" + ], + [ + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_s32_f32", + "SIMD_ISA": "SVE", + "name": "svclastb[_n_s64]", "arguments": [ - "float32x2_t a", - "const int n" + "svbool_t pg", + "int64_t fallback", + "svint64_t data" ], "return_type": { - "value": "int32x2_t" + "value": "int64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "data": { + "register": "Zdata.D" }, - "n": { - "minimum": 1, - "maximum": 32 + "fallback": { + "register": "Dtied|Xtied" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "CLASTB" + ], + [ + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_s64_f64", + "SIMD_ISA": "SVE", + "name": "svclastb[_n_s8]", "arguments": [ - "float64x1_t a", - "const int n" + "svbool_t pg", + "int8_t fallback", + "svint8_t data" ], "return_type": { - "value": "int64x1_t" + "value": "int8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "data": { + "register": "Zdata.B" }, - "n": { - "minimum": 1, - "maximum": 64 + "fallback": { + "register": "Btied|Wtied" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -20460,86 +24239,101 @@ ], "instructions": [ [ - "FCVTZS" + "CLASTB" + ], + [ + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_u16_f16", + "SIMD_ISA": "SVE", + "name": "svclastb[_n_u16]", "arguments": [ - "float16x4_t a", - "const int n" + "svbool_t pg", + "uint16_t fallback", + "svuint16_t data" ], "return_type": { - "value": "uint16x4_t" + "value": "uint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "data": { + "register": "Zdata.H" }, - "n": { - "minimum": 1, - "maximum": 16 + "fallback": { + "register": "Htied|Wtied" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZU" + "CLASTB" + ], + [ + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_u32_f32", + "SIMD_ISA": "SVE", + "name": "svclastb[_n_u32]", "arguments": [ - "float32x2_t a", - "const int n" + "svbool_t pg", + "uint32_t fallback", + "svuint32_t data" ], "return_type": { - "value": "uint32x2_t" + "value": "uint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "data": { + "register": "Zdata.S" }, - "n": { - "minimum": 1, - "maximum": 32 + "fallback": { + "register": "Stied|Wtied" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCVTZU" + "CLASTB" + ], + [ + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_u64_f64", + "SIMD_ISA": "SVE", + "name": "svclastb[_n_u64]", "arguments": [ - "float64x1_t a", - "const int n" + "svbool_t pg", + "uint64_t fallback", + "svuint64_t data" ], "return_type": { - "value": "uint64x1_t" + "value": "uint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "data": { + "register": "Zdata.D" }, - "n": { - "minimum": 1, - "maximum": 64 + "fallback": { + "register": "Dtied|Xtied" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -20547,71 +24341,102 @@ ], "instructions": [ [ - "FCVTZU" + "CLASTB" + ], + [ + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_s16_f16", + "SIMD_ISA": "SVE", + "name": "svclastb[_n_u8]", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "uint8_t fallback", + "svuint8_t data" ], "return_type": { - "value": "int16x4_t" + "value": "uint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Btied|Wtied" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "CLASTB" + ], + [ + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_s32_f32", + "SIMD_ISA": "SVE", + "name": "svclastb[_s16]", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svint16_t fallback", + "svint16_t data" ], "return_type": { - "value": "int32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Zfallback.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_s64_f64", + "SIMD_ISA": "SVE", + "name": "svclastb[_s32]", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svint32_t fallback", + "svint32_t data" ], "return_type": { - "value": "int64x1_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Zfallback.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -20619,71 +24444,104 @@ ], "instructions": [ [ - "FCVTZS" + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_u16_f16", + "SIMD_ISA": "SVE", + "name": "svclastb[_s64]", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "svint64_t fallback", + "svint64_t data" ], "return_type": { - "value": "uint16x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Zfallback.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_u32_f32", + "SIMD_ISA": "SVE", + "name": "svclastb[_s8]", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svint8_t fallback", + "svint8_t data" ], "return_type": { - "value": "uint32x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Zfallback.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCVTZU" + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_u64_f64", + "SIMD_ISA": "SVE", + "name": "svclastb[_u16]", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svuint16_t fallback", + "svuint16_t data" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Zfallback.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -20691,70 +24549,104 @@ ], "instructions": [ [ - "FCVTZU" + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvta_s16_f16", + "SIMD_ISA": "SVE", + "name": "svclastb[_u32]", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "svuint32_t fallback", + "svuint32_t data" ], "return_type": { - "value": "int16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Zfallback.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTAS" + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvta_s32_f32", + "SIMD_ISA": "SVE", + "name": "svclastb[_u64]", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svuint64_t fallback", + "svuint64_t data" ], "return_type": { - "value": "int32x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Zfallback.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTAS" + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvta_s64_f64", + "SIMD_ISA": "SVE", + "name": "svclastb[_u8]", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svuint8_t fallback", + "svuint8_t data" ], "return_type": { - "value": "int64x1_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Zfallback.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -20762,70 +24654,96 @@ ], "instructions": [ [ - "FCVTAS" + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvta_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcls[_s16]_m", "arguments": [ - "float16x4_t a" + "svuint16_t inactive", + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTAU" + "CLS" + ], + [ + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvta_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcls[_s16]_x", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTAU" + "CLS" + ], + [ + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvta_u64_f64", + "SIMD_ISA": "SVE", + "name": "svcls[_s16]_z", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -20833,22 +24751,31 @@ ], "instructions": [ [ - "FCVTAU" + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtad_s64_f64", + "SIMD_ISA": "SVE", + "name": "svcls[_s32]_m", "arguments": [ - "float64_t a" + "svuint32_t inactive", + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "int64_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -20856,22 +24783,30 @@ ], "instructions": [ [ - "FCVTAS" + "CLS" + ], + [ + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtad_u64_f64", + "SIMD_ISA": "SVE", + "name": "svcls[_s32]_x", "arguments": [ - "float64_t a" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint64_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -20879,22 +24814,30 @@ ], "instructions": [ [ - "FCVTAU" + "CLS" + ], + [ + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtah_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcls[_s32]_z", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "int16_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -20902,46 +24845,62 @@ ], "instructions": [ [ - "FCVTAS" + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtah_s32_f16", + "SIMD_ISA": "SVE", + "name": "svcls[_s64]_m", "arguments": [ - "float16_t a" + "svuint64_t inactive", + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "int32_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTAS" + "CLS" + ], + [ + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtah_s64_f16", + "SIMD_ISA": "SVE", + "name": "svcls[_s64]_x", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "int64_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -20949,22 +24908,30 @@ ], "instructions": [ [ - "FCVTAS" + "CLS" + ], + [ + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtah_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcls[_s64]_z", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "uint16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -20972,46 +24939,62 @@ ], "instructions": [ [ - "FCVTAU" + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtah_u32_f16", + "SIMD_ISA": "SVE", + "name": "svcls[_s8]_m", "arguments": [ - "float16_t a" + "svuint8_t inactive", + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "uint32_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTAU" + "CLS" + ], + [ + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtah_u64_f16", + "SIMD_ISA": "SVE", + "name": "svcls[_s8]_x", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "uint64_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -21019,70 +25002,93 @@ ], "instructions": [ [ - "FCVTAU" + "CLS" + ], + [ + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtaq_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcls[_s8]_z", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "int16x8_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTAS" + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtaq_s32_f32", + "SIMD_ISA": "SVE", + "name": "svclz[_s16]_m", "arguments": [ - "float32x4_t a" + "svuint16_t inactive", + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "int32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTAS" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtaq_s64_f64", + "SIMD_ISA": "SVE", + "name": "svclz[_s16]_x", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "int64x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -21090,70 +25096,93 @@ ], "instructions": [ [ - "FCVTAS" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtaq_u16_f16", + "SIMD_ISA": "SVE", + "name": "svclz[_s16]_z", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTAU" + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtaq_u32_f32", + "SIMD_ISA": "SVE", + "name": "svclz[_s32]_m", "arguments": [ - "float32x4_t a" + "svuint32_t inactive", + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTAU" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtaq_u64_f64", + "SIMD_ISA": "SVE", + "name": "svclz[_s32]_x", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -21161,22 +25190,30 @@ ], "instructions": [ [ - "FCVTAU" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtas_s32_f32", + "SIMD_ISA": "SVE", + "name": "svclz[_s32]_z", "arguments": [ - "float32_t a" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "int32_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -21184,22 +25221,31 @@ ], "instructions": [ [ - "FCVTAS" + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtas_u32_f32", + "SIMD_ISA": "SVE", + "name": "svclz[_s64]_m", "arguments": [ - "float32_t a" + "svuint64_t inactive", + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "uint32_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -21207,22 +25253,30 @@ ], "instructions": [ [ - "FCVTAU" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtd_f64_s64", + "SIMD_ISA": "SVE", + "name": "svclz[_s64]_x", "arguments": [ - "int64_t a" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "float64_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -21230,22 +25284,30 @@ ], "instructions": [ [ - "SCVTF" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtd_f64_u64", + "SIMD_ISA": "SVE", + "name": "svclz[_s64]_z", "arguments": [ - "uint64_t a" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "float64_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -21253,27 +25315,31 @@ ], "instructions": [ [ - "UCVTF" + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtd_n_f64_s64", + "SIMD_ISA": "SVE", + "name": "svclz[_s8]_m", "arguments": [ - "int64_t a", - "const int n" + "svuint8_t inactive", + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "float64_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "inactive": { + "register": "Zinactive.B|Ztied.B" }, - "n": { - "minimum": 1, - "maximum": 64 + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -21281,27 +25347,30 @@ ], "instructions": [ [ - "SCVTF" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtd_n_f64_u64", + "SIMD_ISA": "SVE", + "name": "svclz[_s8]_x", "arguments": [ - "uint64_t a", - "const int n" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "float64_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.B|Ztied.B" }, - "n": { - "minimum": 1, - "maximum": 64 + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -21309,27 +25378,30 @@ ], "instructions": [ [ - "UCVTF" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtd_n_s64_f64", + "SIMD_ISA": "SVE", + "name": "svclz[_s8]_z", "arguments": [ - "float64_t a", - "const int n" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "int64_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.B" }, - "n": { - "minimum": 1, - "maximum": 64 + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -21337,27 +25409,31 @@ ], "instructions": [ [ - "FCVTZS" + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtd_n_u64_f64", + "SIMD_ISA": "SVE", + "name": "svclz[_u16]_m", "arguments": [ - "float64_t a", - "const int n" + "svuint16_t inactive", + "svbool_t pg", + "svuint16_t op" ], "return_type": { - "value": "uint64_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "inactive": { + "register": "Zinactive.H|Ztied.H" }, - "n": { - "minimum": 1, - "maximum": 64 + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -21365,22 +25441,30 @@ ], "instructions": [ [ - "FCVTZU" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtd_s64_f64", + "SIMD_ISA": "SVE", + "name": "svclz[_u16]_x", "arguments": [ - "float64_t a" + "svbool_t pg", + "svuint16_t op" ], "return_type": { - "value": "int64_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -21388,22 +25472,30 @@ ], "instructions": [ [ - "FCVTZS" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtd_u64_f64", + "SIMD_ISA": "SVE", + "name": "svclz[_u16]_z", "arguments": [ - "float64_t a" + "svbool_t pg", + "svuint16_t op" ], "return_type": { - "value": "uint64_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -21411,22 +25503,31 @@ ], "instructions": [ [ - "FCVTZU" + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_f16_s16", + "SIMD_ISA": "SVE", + "name": "svclz[_u32]_m", "arguments": [ - "int16_t a" + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" ], "return_type": { - "value": "float16_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -21434,46 +25535,61 @@ ], "instructions": [ [ - "SCVTF" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_f16_s32", + "SIMD_ISA": "SVE", + "name": "svclz[_u32]_x", "arguments": [ - "int32_t a" + "svbool_t pg", + "svuint32_t op" ], "return_type": { - "value": "float16_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SCVTF" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_f16_s64", + "SIMD_ISA": "SVE", + "name": "svclz[_u32]_z", "arguments": [ - "int64_t a" + "svbool_t pg", + "svuint32_t op" ], "return_type": { - "value": "float16_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -21481,22 +25597,31 @@ ], "instructions": [ [ - "SCVTF" + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_f16_u16", + "SIMD_ISA": "SVE", + "name": "svclz[_u64]_m", "arguments": [ - "uint16_t a" + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" ], "return_type": { - "value": "float16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -21504,46 +25629,61 @@ ], "instructions": [ [ - "UCVTF" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_f16_u32", + "SIMD_ISA": "SVE", + "name": "svclz[_u64]_x", "arguments": [ - "uint32_t a" + "svbool_t pg", + "svuint64_t op" ], "return_type": { - "value": "float16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "UCVTF" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_f16_u64", + "SIMD_ISA": "SVE", + "name": "svclz[_u64]_z", "arguments": [ - "uint64_t a" + "svbool_t pg", + "svuint64_t op" ], "return_type": { - "value": "float16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -21551,27 +25691,31 @@ ], "instructions": [ [ - "UCVTF" + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_f16_s16", + "SIMD_ISA": "SVE", + "name": "svclz[_u8]_m", "arguments": [ - "int16_t a", - "const int n" + "svuint8_t inactive", + "svbool_t pg", + "svuint8_t op" ], "return_type": { - "value": "float16_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "inactive": { + "register": "Zinactive.B|Ztied.B" }, - "n": { - "minimum": 1, - "maximum": 16 + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -21579,56 +25723,61 @@ ], "instructions": [ [ - "SCVTF" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_f16_s32", + "SIMD_ISA": "SVE", + "name": "svclz[_u8]_x", "arguments": [ - "int32_t a", - "const int n" + "svbool_t pg", + "svuint8_t op" ], "return_type": { - "value": "float16_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op": { + "register": "Zop.B|Ztied.B" }, - "n": { - "minimum": 1, - "maximum": 16 + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SCVTF" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_f16_s64", + "SIMD_ISA": "SVE", + "name": "svclz[_u8]_z", "arguments": [ - "int64_t a", - "const int n" + "svbool_t pg", + "svuint8_t op" ], "return_type": { - "value": "float16_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op": { + "register": "Zop.B" }, - "n": { - "minimum": 1, - "maximum": 16 + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -21636,27 +25785,36 @@ ], "instructions": [ [ - "SCVTF" + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_f16_u16", + "SIMD_ISA": "SVE", + "name": "svcmla[_f16]_m", "arguments": [ - "uint16_t a", - "const int n" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "float16_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -21664,56 +25822,79 @@ ], "instructions": [ [ - "UCVTF" + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_f16_u32", + "SIMD_ISA": "SVE", + "name": "svcmla[_f16]_x", "arguments": [ - "uint32_t a", - "const int n" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "float16_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "UCVTF" + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_f16_u64", + "SIMD_ISA": "SVE", + "name": "svcmla[_f16]_z", "arguments": [ - "uint64_t a", - "const int n" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "float16_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -21721,27 +25902,36 @@ ], "instructions": [ [ - "UCVTF" + "MOVPRFX", + "FCMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcmla[_f32]_m", "arguments": [ - "float16_t a", - "const int n" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "int16_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -21749,56 +25939,79 @@ ], "instructions": [ [ - "FCVTZS" + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_s32_f16", + "SIMD_ISA": "SVE", + "name": "svcmla[_f32]_x", "arguments": [ - "float16_t a", - "const int n" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "int32_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_s64_f16", + "SIMD_ISA": "SVE", + "name": "svcmla[_f32]_z", "arguments": [ - "float16_t a", - "const int n" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "int64_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.S" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -21806,27 +26019,36 @@ ], "instructions": [ [ - "FCVTZS" + "MOVPRFX", + "FCMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmla[_f64]_m", "arguments": [ - "float16_t a", - "const int n" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint16_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -21834,56 +26056,79 @@ ], "instructions": [ [ - "FCVTZU" + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_u32_f16", + "SIMD_ISA": "SVE", + "name": "svcmla[_f64]_x", "arguments": [ - "float16_t a", - "const int n" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint32_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZU" + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_u64_f16", + "SIMD_ISA": "SVE", + "name": "svcmla[_f64]_z", "arguments": [ - "float16_t a", - "const int n" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint64_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.D" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -21891,22 +26136,32 @@ ], "instructions": [ [ - "FCVTZU" + "MOVPRFX", + "FCMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_s16_f16", + "SIMD_ISA": "SVE2", + "name": "svcmla[_s16]", "arguments": [ - "float16_t a" + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "int16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" } }, "Architectures": [ @@ -21914,46 +26169,71 @@ ], "instructions": [ [ - "FCVTZS" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_s32_f16", + "SIMD_ISA": "SVE2", + "name": "svcmla[_s32]", "arguments": [ - "float16_t a" + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "int32_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_s64_f16", + "SIMD_ISA": "SVE2", + "name": "svcmla[_s64]", "arguments": [ - "float16_t a" + "svint64_t op1", + "svint64_t op2", + "svint64_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "int64_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" } }, "Architectures": [ @@ -21961,22 +26241,35 @@ ], "instructions": [ [ - "FCVTZS" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_u16_f16", + "SIMD_ISA": "SVE2", + "name": "svcmla[_s8]", "arguments": [ - "float16_t a" + "svint8_t op1", + "svint8_t op2", + "svint8_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint16_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" } }, "Architectures": [ @@ -21984,46 +26277,71 @@ ], "instructions": [ [ - "FCVTZU" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_u32_f16", + "SIMD_ISA": "SVE2", + "name": "svcmla[_u16]", "arguments": [ - "float16_t a" + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint32_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZU" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_u64_f16", + "SIMD_ISA": "SVE2", + "name": "svcmla[_u32]", "arguments": [ - "float16_t a" + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint64_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" } }, "Architectures": [ @@ -22031,70 +26349,112 @@ ], "instructions": [ [ - "FCVTZU" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtm_s16_f16", + "SIMD_ISA": "SVE2", + "name": "svcmla[_u64]", "arguments": [ - "float16x4_t a" + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "int16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTMS" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtm_s32_f32", + "SIMD_ISA": "SVE2", + "name": "svcmla[_u8]", "arguments": [ - "float32x2_t a" + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "int32x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTMS" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtm_s64_f64", + "SIMD_ISA": "SVE", + "name": "svcmla_lane[_f16]", "arguments": [ - "float64x1_t a" + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" ], "return_type": { - "value": "int64x1_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" } }, "Architectures": [ @@ -22102,70 +26462,122 @@ ], "instructions": [ [ - "FCVTMS" + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtm_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmla_lane[_f32]", "arguments": [ - "float16x4_t a" + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint16x4_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTMU" + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtm_u32_f32", + "SIMD_ISA": "SVE2", + "name": "svcmla_lane[_s16]", "arguments": [ - "float32x2_t a" + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTMU" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtm_u64_f64", + "SIMD_ISA": "SVE2", + "name": "svcmla_lane[_s32]", "arguments": [ - "float64x1_t a" + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint64x1_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" } }, "Architectures": [ @@ -22173,22 +26585,40 @@ ], "instructions": [ [ - "FCVTMU" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmd_s64_f64", + "SIMD_ISA": "SVE2", + "name": "svcmla_lane[_u16]", "arguments": [ - "float64_t a" + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" ], "return_type": { - "value": "int64_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" } }, "Architectures": [ @@ -22196,22 +26626,40 @@ ], "instructions": [ [ - "FCVTMS" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmd_u64_f64", + "SIMD_ISA": "SVE2", + "name": "svcmla_lane[_u32]", "arguments": [ - "float64_t a" + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint64_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" } }, "Architectures": [ @@ -22219,22 +26667,34 @@ ], "instructions": [ [ - "FCVTMU" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmh_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_f16]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "int16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -22242,46 +26702,61 @@ ], "instructions": [ [ - "FCVTMS" + "FCMEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmh_s32_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_f32]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "int32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTMS" + "FCMEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmh_s64_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_f64]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "int64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -22289,22 +26764,30 @@ ], "instructions": [ [ - "FCVTMS" + "FCMEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmh_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_f16]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -22312,46 +26795,67 @@ ], "instructions": [ [ - "FCVTMU" + "FCMEQ" + ], + [ + "FCMEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmh_u32_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_f32]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTMU" + "FCMEQ" + ], + [ + "FCMEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmh_u64_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_f64]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -22359,70 +26863,101 @@ ], "instructions": [ [ - "FCVTMU" + "FCMEQ" + ], + [ + "FCMEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmq_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_s16]", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTMS" + "CMPEQ" + ], + [ + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmq_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_s32]", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTMS" + "CMPEQ" + ], + [ + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmq_s64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_s64]", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -22430,70 +26965,101 @@ ], "instructions": [ [ - "FCVTMS" + "CMPEQ" + ], + [ + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmq_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_s8]", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTMU" + "CMPEQ" + ], + [ + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmq_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_u16]", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTMU" + "CMPEQ" + ], + [ + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmq_u64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_u32]", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -22501,22 +27067,33 @@ ], "instructions": [ [ - "FCVTMU" + "CMPEQ" + ], + [ + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtms_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_u64]", "arguments": [ - "float32_t a" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "int32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -22524,22 +27101,33 @@ ], "instructions": [ [ - "FCVTMS" + "CMPEQ" + ], + [ + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtms_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_u8]", "arguments": [ - "float32_t a" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -22547,70 +27135,95 @@ ], "instructions": [ [ - "FCVTMU" + "CMPEQ" + ], + [ + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtn_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_s16]", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTNS" + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtn_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_s32]", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTNS" + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtn_s64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_s64]", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "int64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -22618,70 +27231,92 @@ ], "instructions": [ [ - "FCVTNS" + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtn_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_s8]", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTNU" + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtn_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_u16]", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTNU" + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtn_u64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_u32]", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -22689,22 +27324,30 @@ ], "instructions": [ [ - "FCVTNU" + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnd_s64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_u64]", "arguments": [ - "float64_t a" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -22712,22 +27355,30 @@ ], "instructions": [ [ - "FCVTNS" + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnd_u64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_u8]", "arguments": [ - "float64_t a" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -22735,22 +27386,30 @@ ], "instructions": [ [ - "FCVTNU" + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnh_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq_wide[_n_s16]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint16_t op1", + "int64_t op2" ], "return_type": { - "value": "int16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -22758,46 +27417,67 @@ ], "instructions": [ [ - "FCVTNS" + "CMPEQ" + ], + [ + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnh_s32_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq_wide[_n_s32]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint32_t op1", + "int64_t op2" ], "return_type": { - "value": "int32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTNS" + "CMPEQ" + ], + [ + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnh_s64_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq_wide[_n_s8]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint8_t op1", + "int64_t op2" ], "return_type": { - "value": "int64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -22805,22 +27485,33 @@ ], "instructions": [ [ - "FCVTNS" + "CMPEQ" + ], + [ + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnh_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq_wide[_s16]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint16_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -22828,46 +27519,61 @@ ], "instructions": [ [ - "FCVTNU" + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnh_u32_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq_wide[_s32]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint32_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTNU" + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnh_u64_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq_wide[_s8]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint8_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -22875,70 +27581,92 @@ ], "instructions": [ [ - "FCVTNU" + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnq_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge[_f16]", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTNS" + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnq_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpge[_f32]", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTNS" + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnq_s64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpge[_f64]", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -22946,70 +27674,98 @@ ], "instructions": [ [ - "FCVTNS" + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnq_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_f16]", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTNU" + "FCMGE" + ], + [ + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnq_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_f32]", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTNU" + "FCMGE" + ], + [ + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnq_u64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_f64]", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -23017,22 +27773,33 @@ ], "instructions": [ [ - "FCVTNU" + "FCMGE" + ], + [ + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtns_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_s16]", "arguments": [ - "float32_t a" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "int32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -23040,22 +27807,33 @@ ], "instructions": [ [ - "FCVTNS" + "CMPGE" + ], + [ + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtns_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_s32]", "arguments": [ - "float32_t a" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -23063,70 +27841,101 @@ ], "instructions": [ [ - "FCVTNU" + "CMPGE" + ], + [ + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtp_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_s64]", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTPS" + "CMPGE" + ], + [ + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtp_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_s8]", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTPS" + "CMPGE" + ], + [ + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtp_s64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_u16]", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "int64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -23134,70 +27943,101 @@ ], "instructions": [ [ - "FCVTPS" + "CMPHS" + ], + [ + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtp_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_u32]", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTPU" + "CMPHS" + ], + [ + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtp_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_u64]", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTPU" + "CMPHS" + ], + [ + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtp_u64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_u8]", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -23205,22 +28045,33 @@ ], "instructions": [ [ - "FCVTPU" + "CMPHS" + ], + [ + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtpd_s64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpge[_s16]", "arguments": [ - "float64_t a" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "int64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -23228,22 +28079,30 @@ ], "instructions": [ [ - "FCVTPS" + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtpd_u64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpge[_s32]", "arguments": [ - "float64_t a" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -23251,22 +28110,30 @@ ], "instructions": [ [ - "FCVTPU" + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtph_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge[_s64]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "int16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -23274,46 +28141,61 @@ ], "instructions": [ [ - "FCVTPS" + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtph_s32_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge[_s8]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "int32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTPS" + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtph_s64_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge[_u16]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "int64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -23321,22 +28203,30 @@ ], "instructions": [ [ - "FCVTPS" + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtph_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge[_u32]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -23344,46 +28234,61 @@ ], "instructions": [ [ - "FCVTPU" + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtph_u32_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge[_u64]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTPU" + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtph_u64_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge[_u8]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -23391,70 +28296,98 @@ ], "instructions": [ [ - "FCVTPU" + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtpq_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_n_s16]", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svint16_t op1", + "int64_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTPS" + "CMPGE" + ], + [ + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtpq_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_n_s32]", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svint32_t op1", + "int64_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTPS" + "CMPGE" + ], + [ + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtpq_s64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_n_s8]", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svint8_t op1", + "int64_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -23462,70 +28395,101 @@ ], "instructions": [ [ - "FCVTPS" + "CMPGE" + ], + [ + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtpq_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_n_u16]", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTPU" + "CMPHS" + ], + [ + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtpq_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_n_u32]", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTPU" + "CMPHS" + ], + [ + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtpq_u64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_n_u8]", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -23533,22 +28497,33 @@ ], "instructions": [ [ - "FCVTPU" + "CMPHS" + ], + [ + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtps_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_s16]", "arguments": [ - "float32_t a" + "svbool_t pg", + "svint16_t op1", + "svint64_t op2" ], "return_type": { - "value": "int32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -23556,22 +28531,30 @@ ], "instructions": [ [ - "FCVTPS" + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtps_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_s32]", "arguments": [ - "float32_t a" + "svbool_t pg", + "svint32_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -23579,120 +28562,154 @@ ], "instructions": [ [ - "FCVTPU" + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_f16_s16", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_s8]", "arguments": [ - "int16x8_t a" + "svbool_t pg", + "svint8_t op1", + "svint64_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SCVTF" + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_f16_u16", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_u16]", "arguments": [ - "uint16x8_t a" + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "UCVTF" + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_f32_s32", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_u32]", "arguments": [ - "int32x4_t a" + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SCVTF" + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_f32_u32", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_u8]", "arguments": [ - "uint32x4_t a" + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UCVTF" + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_f64_s64", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_f16]", "arguments": [ - "int64x2_t a" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -23700,22 +28717,30 @@ ], "instructions": [ [ - "SCVTF" + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_f64_u64", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_f32]", "arguments": [ - "uint64x2_t a" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -23723,145 +28748,163 @@ ], "instructions": [ [ - "UCVTF" + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_f16_s16", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_f64]", "arguments": [ - "int16x8_t a", - "const int n" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SCVTF" + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_f16_u16", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_f16]", "arguments": [ - "uint16x8_t a", - "const int n" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "UCVTF" + "FCMGT" + ], + [ + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_f32_s32", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_f32]", "arguments": [ - "int32x4_t a", - "const int n" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" }, - "n": { - "minimum": 1, - "maximum": 32 + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SCVTF" + "FCMGT" + ], + [ + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_f32_u32", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_f64]", "arguments": [ - "uint32x4_t a", - "const int n" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D" }, - "n": { - "minimum": 1, - "maximum": 32 + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UCVTF" + "FCMGT" + ], + [ + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_f64_s64", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_s16]", "arguments": [ - "int64x2_t a", - "const int n" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" }, - "n": { - "minimum": 1, - "maximum": 64 + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -23869,27 +28912,33 @@ ], "instructions": [ [ - "SCVTF" + "CMPGT" + ], + [ + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_f64_u64", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_s32]", "arguments": [ - "uint64x2_t a", - "const int n" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" }, - "n": { - "minimum": 1, - "maximum": 64 + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -23897,86 +28946,101 @@ ], "instructions": [ [ - "UCVTF" + "CMPGT" + ], + [ + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_s64]", "arguments": [ - "float16x8_t a", - "const int n" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "CMPGT" + ], + [ + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_s8]", "arguments": [ - "float32x4_t a", - "const int n" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.B" }, - "n": { - "minimum": 1, - "maximum": 32 + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "CMPGT" + ], + [ + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_s64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_u16]", "arguments": [ - "float64x2_t a", - "const int n" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" }, - "n": { - "minimum": 1, - "maximum": 64 + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -23984,86 +29048,101 @@ ], "instructions": [ [ - "FCVTZS" + "CMPHI" + ], + [ + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_u32]", "arguments": [ - "float16x8_t a", - "const int n" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZU" + "CMPHI" + ], + [ + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_u64]", "arguments": [ - "float32x4_t a", - "const int n" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D" }, - "n": { - "minimum": 1, - "maximum": 32 + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCVTZU" + "CMPHI" + ], + [ + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_u64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_u8]", "arguments": [ - "float64x2_t a", - "const int n" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B" }, - "n": { - "minimum": 1, - "maximum": 64 + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -24071,71 +29150,95 @@ ], "instructions": [ [ - "FCVTZU" + "CMPHI" + ], + [ + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_s16]", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_s32]", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_s64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_s64]", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -24143,71 +29246,92 @@ ], "instructions": [ [ - "FCVTZS" + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_s8]", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_u16]", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCVTZU" + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_u64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_u32]", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -24215,22 +29339,30 @@ ], "instructions": [ [ - "FCVTZU" + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvts_f32_s32", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_u64]", "arguments": [ - "int32_t a" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -24238,22 +29370,30 @@ ], "instructions": [ [ - "SCVTF" + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvts_f32_u32", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_u8]", "arguments": [ - "uint32_t a" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "float32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -24261,27 +29401,30 @@ ], "instructions": [ [ - "UCVTF" + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvts_n_f32_s32", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_n_s16]", "arguments": [ - "int32_t a", - "const int n" + "svbool_t pg", + "svint16_t op1", + "int64_t op2" ], "return_type": { - "value": "float32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.H" }, - "n": { - "minimum": 1, - "maximum": 32 + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -24289,27 +29432,33 @@ ], "instructions": [ [ - "SCVTF" + "CMPGT" + ], + [ + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvts_n_f32_u32", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_n_s32]", "arguments": [ - "uint32_t a", - "const int n" + "svbool_t pg", + "svint32_t op1", + "int64_t op2" ], "return_type": { - "value": "float32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.S" }, - "n": { - "minimum": 1, - "maximum": 32 + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -24317,27 +29466,33 @@ ], "instructions": [ [ - "UCVTF" + "CMPGT" + ], + [ + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvts_n_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_n_s8]", "arguments": [ - "float32_t a", - "const int n" + "svbool_t pg", + "svint8_t op1", + "int64_t op2" ], "return_type": { - "value": "int32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.B" }, - "n": { - "minimum": 1, - "maximum": 32 + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -24345,27 +29500,33 @@ ], "instructions": [ [ - "FCVTZS" + "CMPGT" + ], + [ + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvts_n_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_n_u16]", "arguments": [ - "float32_t a", - "const int n" + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.H" }, - "n": { - "minimum": 1, - "maximum": 32 + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -24373,22 +29534,33 @@ ], "instructions": [ [ - "FCVTZU" + "CMPHI" + ], + [ + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvts_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_n_u32]", "arguments": [ - "float32_t a" + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" ], "return_type": { - "value": "int32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -24396,22 +29568,33 @@ ], "instructions": [ [ - "FCVTZS" + "CMPHI" + ], + [ + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvts_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_n_u8]", "arguments": [ - "float32_t a" + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -24419,22 +29602,33 @@ ], "instructions": [ [ - "FCVTZU" + "CMPHI" + ], + [ + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtx_f32_f64", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_s16]", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svint16_t op1", + "svint64_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -24442,26 +29636,30 @@ ], "instructions": [ [ - "FCVTXN" + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtx_high_f32_f64", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_s32]", "arguments": [ - "float32x2_t r", - "float64x2_t a" + "svbool_t pg", + "svint32_t op1", + "svint64_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" }, - "r": { - "register": "Vd.2S" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -24469,22 +29667,30 @@ ], "instructions": [ [ - "FCVTXN2" + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtxd_f32_f64", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_s8]", "arguments": [ - "float64_t a" + "svbool_t pg", + "svint8_t op1", + "svint64_t op2" ], "return_type": { - "value": "float32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -24492,26 +29698,30 @@ ], "instructions": [ [ - "FCVTXN" + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdiv_f16", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_u16]", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -24519,26 +29729,30 @@ ], "instructions": [ [ - "FDIV" + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdiv_f32", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_u32]", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -24546,26 +29760,30 @@ ], "instructions": [ [ - "FDIV" + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdiv_f64", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_u8]", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -24573,54 +29791,61 @@ ], "instructions": [ [ - "FDIV" + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdivh_f16", + "SIMD_ISA": "SVE", + "name": "svcmple[_f16]", "arguments": [ - "float16_t a", - "float16_t b" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "float16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Hm" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FDIV" + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdivq_f16", + "SIMD_ISA": "SVE", + "name": "svcmple[_f32]", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -24628,26 +29853,30 @@ ], "instructions": [ [ - "FDIV" + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdivq_f32", + "SIMD_ISA": "SVE", + "name": "svcmple[_f64]", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -24655,26 +29884,30 @@ ], "instructions": [ [ - "FDIV" + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdivq_f64", + "SIMD_ISA": "SVE", + "name": "svcmple[_n_f16]", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -24682,109 +29915,101 @@ ], "instructions": [ [ - "FDIV" + "FCMLE" + ], + [ + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdot_lane_s32", + "SIMD_ISA": "SVE", + "name": "svcmple[_n_f32]", "arguments": [ - "int32x2_t r", - "int8x8_t a", - "int8x8_t b", - "const int lane" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.4B" + "op1": { + "register": "Zop1.S" }, - "lane": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.S[*]" }, - "r": { - "register": "Vd.2S" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SDOT" + "FCMLE" + ], + [ + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdot_lane_u32", + "SIMD_ISA": "SVE", + "name": "svcmple[_n_f64]", "arguments": [ - "uint32x2_t r", - "uint8x8_t a", - "uint8x8_t b", - "const int lane" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.4B" - }, - "lane": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.D[*]" }, - "r": { - "register": "Vd.2S" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "UDOT" + "FCMLE" + ], + [ + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdot_laneq_s32", + "SIMD_ISA": "SVE", + "name": "svcmple[_n_s16]", "arguments": [ - "int32x2_t r", - "int8x8_t a", - "int8x16_t b", - "const int lane" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.4B" + "op1": { + "register": "Zop1.H" }, - "lane": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.H[*]" }, - "r": { - "register": "Vd.2S" + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -24792,35 +30017,33 @@ ], "instructions": [ [ - "SDOT" + "CMPLE" + ], + [ + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdot_laneq_u32", + "SIMD_ISA": "SVE", + "name": "svcmple[_n_s32]", "arguments": [ - "uint32x2_t r", - "uint8x8_t a", - "uint8x16_t b", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.4B" + "op1": { + "register": "Zop1.S" }, - "lane": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.S[*]" }, - "r": { - "register": "Vd.2S" + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -24828,173 +30051,169 @@ ], "instructions": [ [ - "UDOT" + "CMPLE" + ], + [ + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdot_s32", + "SIMD_ISA": "SVE", + "name": "svcmple[_n_s64]", "arguments": [ - "int32x2_t r", - "int8x8_t a", - "int8x8_t b" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.D[*]" }, - "r": { - "register": "Vd.2S" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SDOT" + "CMPLE" + ], + [ + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdot_u32", + "SIMD_ISA": "SVE", + "name": "svcmple[_n_s8]", "arguments": [ - "uint32x2_t r", - "uint8x8_t a", - "uint8x8_t b" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.B[*]" }, - "r": { - "register": "Vd.2S" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "UDOT" + "CMPLE" + ], + [ + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdotq_lane_s32", + "SIMD_ISA": "SVE", + "name": "svcmple[_n_u16]", "arguments": [ - "int32x4_t r", - "int8x16_t a", - "int8x8_t b", - "const int lane" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.4B" + "op1": { + "register": "Zop1.H" }, - "lane": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.H[*]" }, - "r": { - "register": "Vd.4S" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SDOT" + "CMPLS" + ], + [ + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdotq_lane_u32", + "SIMD_ISA": "SVE", + "name": "svcmple[_n_u32]", "arguments": [ - "uint32x4_t r", - "uint8x16_t a", - "uint8x8_t b", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.4B" + "op1": { + "register": "Zop1.S" }, - "lane": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.S[*]" }, - "r": { - "register": "Vd.4S" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "UDOT" + "CMPLS" + ], + [ + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdotq_laneq_s32", + "SIMD_ISA": "SVE", + "name": "svcmple[_n_u64]", "arguments": [ - "int32x4_t r", - "int8x16_t a", - "int8x16_t b", - "const int lane" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.4B" + "op1": { + "register": "Zop1.D" }, - "lane": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.D[*]" }, - "r": { - "register": "Vd.4S" + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -25002,35 +30221,33 @@ ], "instructions": [ [ - "SDOT" + "CMPLS" + ], + [ + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdotq_laneq_u32", + "SIMD_ISA": "SVE", + "name": "svcmple[_n_u8]", "arguments": [ - "uint32x4_t r", - "uint8x16_t a", - "uint8x16_t b", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.4B" + "op1": { + "register": "Zop1.B" }, - "lane": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.B[*]" }, - "r": { - "register": "Vd.4S" + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -25038,151 +30255,157 @@ ], "instructions": [ [ - "UDOT" + "CMPLS" + ], + [ + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdotq_s32", + "SIMD_ISA": "SVE", + "name": "svcmple[_s16]", "arguments": [ - "int32x4_t r", - "int8x16_t a", - "int8x16_t b" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.H" }, - "r": { - "register": "Vd.4S" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SDOT" + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdotq_u32", + "SIMD_ISA": "SVE", + "name": "svcmple[_s32]", "arguments": [ - "uint32x4_t r", - "uint8x16_t a", - "uint8x16_t b" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.S" }, - "r": { - "register": "Vd.4S" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "UDOT" + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_f16", + "SIMD_ISA": "SVE", + "name": "svcmple[_s64]", "arguments": [ - "float16x4_t vec", - "const int lane" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_f32", + "SIMD_ISA": "SVE", + "name": "svcmple[_s8]", "arguments": [ - "float32x2_t vec", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_f64", + "SIMD_ISA": "SVE", + "name": "svcmple[_u16]", "arguments": [ - "float64x1_t vec", - "const int lane" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "float64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -25190,356 +30413,389 @@ ], "instructions": [ [ - "DUP" + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_p16", + "SIMD_ISA": "SVE", + "name": "svcmple[_u32]", "arguments": [ - "poly16x4_t vec", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "poly16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_p64", + "SIMD_ISA": "SVE", + "name": "svcmple[_u64]", "arguments": [ - "poly64x1_t vec", - "const int lane" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "poly64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_p8", + "SIMD_ISA": "SVE", + "name": "svcmple[_u8]", "arguments": [ - "poly8x8_t vec", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "poly8x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_s16", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_n_s16]", "arguments": [ - "int16x4_t vec", - "const int lane" + "svbool_t pg", + "svint16_t op1", + "int64_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLE" + ], + [ + "CMPLE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_s32", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_n_s32]", "arguments": [ - "int32x2_t vec", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "int64_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLE" + ], + [ + "CMPLE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_s64", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_n_s8]", "arguments": [ - "int64x1_t vec", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "int64_t op2" ], "return_type": { - "value": "int64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLE" + ], + [ + "CMPLE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_s8", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_n_u16]", "arguments": [ - "int8x8_t vec", - "const int lane" + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLS" + ], + [ + "CMPLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_u16", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_n_u32]", "arguments": [ - "uint16x4_t vec", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLS" + ], + [ + "CMPLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_u32", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_n_u8]", "arguments": [ - "uint32x2_t vec", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLS" + ], + [ + "CMPLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_u64", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_s16]", "arguments": [ - "uint64x1_t vec", - "const int lane" + "svbool_t pg", + "svint16_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_u8", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_s32]", "arguments": [ - "uint8x8_t vec", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_f16", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_s8]", "arguments": [ - "float16x8_t vec", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "svint64_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -25547,27 +30803,30 @@ ], "instructions": [ [ - "DUP" + "CMPLE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_f32", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_u16]", "arguments": [ - "float32x4_t vec", - "const int lane" + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.4S" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -25575,27 +30834,30 @@ ], "instructions": [ [ - "DUP" + "CMPLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_f64", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_u32]", "arguments": [ - "float64x2_t vec", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.2D" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -25603,27 +30865,30 @@ ], "instructions": [ [ - "DUP" + "CMPLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_p16", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_u8]", "arguments": [ - "poly16x8_t vec", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" ], "return_type": { - "value": "poly16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -25631,27 +30896,30 @@ ], "instructions": [ [ - "DUP" + "CMPLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_p64", + "SIMD_ISA": "SVE", + "name": "svcmplt[_f16]", "arguments": [ - "poly64x2_t vec", - "const int lane" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "poly64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.2D" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -25659,27 +30927,30 @@ ], "instructions": [ [ - "DUP" + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_p8", + "SIMD_ISA": "SVE", + "name": "svcmplt[_f32]", "arguments": [ - "poly8x16_t vec", - "const int lane" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "poly8x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -25687,27 +30958,30 @@ ], "instructions": [ [ - "DUP" + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_s16", + "SIMD_ISA": "SVE", + "name": "svcmplt[_f64]", "arguments": [ - "int16x8_t vec", - "const int lane" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -25715,27 +30989,30 @@ ], "instructions": [ [ - "DUP" + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_s32", + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_f16]", "arguments": [ - "int32x4_t vec", - "const int lane" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.4S" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -25743,27 +31020,33 @@ ], "instructions": [ [ - "DUP" + "FCMLT" + ], + [ + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_s64", + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_f32]", "arguments": [ - "int64x2_t vec", - "const int lane" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "int64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.2D" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -25771,27 +31054,33 @@ ], "instructions": [ [ - "DUP" + "FCMLT" + ], + [ + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_s8", + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_f64]", "arguments": [ - "int8x16_t vec", - "const int lane" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -25799,27 +31088,33 @@ ], "instructions": [ [ - "DUP" + "FCMLT" + ], + [ + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_u16", + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_s16]", "arguments": [ - "uint16x8_t vec", - "const int lane" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -25827,27 +31122,33 @@ ], "instructions": [ [ - "DUP" + "CMPLT" + ], + [ + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_u32", + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_s32]", "arguments": [ - "uint32x4_t vec", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.4S" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -25855,27 +31156,33 @@ ], "instructions": [ [ - "DUP" + "CMPLT" + ], + [ + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_u64", + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_s64]", "arguments": [ - "uint64x2_t vec", - "const int lane" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.2D" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -25883,27 +31190,33 @@ ], "instructions": [ [ - "DUP" + "CMPLT" + ], + [ + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_u8", + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_s8]", "arguments": [ - "uint8x16_t vec", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -25911,72 +31224,101 @@ ], "instructions": [ [ - "DUP" + "CMPLT" + ], + [ + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_f16", + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_u16]", "arguments": [ - "float16_t value" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLO" + ], + [ + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_f32", + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_u32]", "arguments": [ - "float32_t value" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLO" + ], + [ + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_f64", + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_u64]", "arguments": [ - "float64_t value" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "float64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -25984,301 +31326,383 @@ ], "instructions": [ [ - "INS" + "CMPLO" + ], + [ + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_p16", + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_u8]", "arguments": [ - "poly16_t value" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "poly16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLO" + ], + [ + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_p64", + "SIMD_ISA": "SVE", + "name": "svcmplt[_s16]", "arguments": [ - "poly64_t value" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "poly64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "INS" + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_p8", + "SIMD_ISA": "SVE", + "name": "svcmplt[_s32]", "arguments": [ - "poly8_t value" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "poly8x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_s16", + "SIMD_ISA": "SVE", + "name": "svcmplt[_s64]", "arguments": [ - "int16_t value" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_s32", + "SIMD_ISA": "SVE", + "name": "svcmplt[_s8]", "arguments": [ - "int32_t value" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_s64", + "SIMD_ISA": "SVE", + "name": "svcmplt[_u16]", "arguments": [ - "int64_t value" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "int64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_s8", + "SIMD_ISA": "SVE", + "name": "svcmplt[_u32]", "arguments": [ - "int8_t value" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_u16", + "SIMD_ISA": "SVE", + "name": "svcmplt[_u64]", "arguments": [ - "uint16_t value" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_u32", + "SIMD_ISA": "SVE", + "name": "svcmplt[_u8]", "arguments": [ - "uint32_t value" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_u64", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_n_s16]", "arguments": [ - "uint64_t value" + "svbool_t pg", + "svint16_t op1", + "int64_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "CMPLT" + ], + [ + "CMPLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_u8", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_n_s32]", "arguments": [ - "uint8_t value" + "svbool_t pg", + "svint32_t op1", + "int64_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLT" + ], + [ + "CMPLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupb_lane_p8", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_n_s8]", "arguments": [ - "poly8x8_t vec", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "int64_t op2" ], "return_type": { - "value": "poly8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -26286,27 +31710,33 @@ ], "instructions": [ [ - "DUP" + "CMPLT" + ], + [ + "CMPLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupb_lane_s8", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_n_u16]", "arguments": [ - "int8x8_t vec", - "const int lane" + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" ], "return_type": { - "value": "int8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -26314,27 +31744,33 @@ ], "instructions": [ [ - "DUP" + "CMPLO" + ], + [ + "CMPLO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupb_lane_u8", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_n_u32]", "arguments": [ - "uint8x8_t vec", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -26342,27 +31778,33 @@ ], "instructions": [ [ - "DUP" + "CMPLO" + ], + [ + "CMPLO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupb_laneq_p8", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_n_u8]", "arguments": [ - "poly8x16_t vec", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" ], "return_type": { - "value": "poly8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -26370,27 +31812,33 @@ ], "instructions": [ [ - "DUP" + "CMPLO" + ], + [ + "CMPLO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupb_laneq_s8", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_s16]", "arguments": [ - "int8x16_t vec", - "const int lane" + "svbool_t pg", + "svint16_t op1", + "svint64_t op2" ], "return_type": { - "value": "int8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -26398,27 +31846,30 @@ ], "instructions": [ [ - "DUP" + "CMPLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupb_laneq_u8", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_s32]", "arguments": [ - "uint8x16_t vec", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -26426,27 +31877,30 @@ ], "instructions": [ [ - "DUP" + "CMPLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupd_lane_f64", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_s8]", "arguments": [ - "float64x1_t vec", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "svint64_t op2" ], "return_type": { - "value": "float64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -26454,27 +31908,30 @@ ], "instructions": [ [ - "DUP" + "CMPLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupd_lane_s64", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_u16]", "arguments": [ - "int64x1_t vec", - "const int lane" + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -26482,27 +31939,30 @@ ], "instructions": [ [ - "DUP" + "CMPLO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupd_lane_u64", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_u32]", "arguments": [ - "uint64x1_t vec", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -26510,27 +31970,30 @@ ], "instructions": [ [ - "DUP" + "CMPLO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupd_laneq_f64", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_u8]", "arguments": [ - "float64x2_t vec", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.2D" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -26538,27 +32001,30 @@ ], "instructions": [ [ - "DUP" + "CMPLO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupd_laneq_s64", + "SIMD_ISA": "SVE", + "name": "svcmpne[_f16]", "arguments": [ - "int64x2_t vec", - "const int lane" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "int64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.2D" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -26566,27 +32032,30 @@ ], "instructions": [ [ - "DUP" + "FCMNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupd_laneq_u64", + "SIMD_ISA": "SVE", + "name": "svcmpne[_f32]", "arguments": [ - "uint64x2_t vec", - "const int lane" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.2D" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -26594,27 +32063,30 @@ ], "instructions": [ [ - "DUP" + "FCMNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vduph_lane_f16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_f64]", "arguments": [ - "float16x4_t vec", - "const int lane" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "float16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -26622,27 +32094,30 @@ ], "instructions": [ [ - "DUP" + "FCMNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vduph_lane_p16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_f16]", "arguments": [ - "poly16x4_t vec", - "const int lane" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "poly16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -26650,27 +32125,33 @@ ], "instructions": [ [ - "DUP" + "FCMNE" + ], + [ + "FCMNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vduph_lane_s16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_f32]", "arguments": [ - "int16x4_t vec", - "const int lane" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "int16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -26678,27 +32159,33 @@ ], "instructions": [ [ - "DUP" + "FCMNE" + ], + [ + "FCMNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vduph_lane_u16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_f64]", "arguments": [ - "uint16x4_t vec", - "const int lane" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -26706,27 +32193,33 @@ ], "instructions": [ [ - "DUP" + "FCMNE" + ], + [ + "FCMNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vduph_laneq_f16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_s16]", "arguments": [ - "float16x8_t vec", - "const int lane" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "float16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -26734,27 +32227,33 @@ ], "instructions": [ [ - "DUP" + "CMPNE" + ], + [ + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vduph_laneq_p16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_s32]", "arguments": [ - "poly16x8_t vec", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "poly16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -26762,27 +32261,33 @@ ], "instructions": [ [ - "DUP" + "CMPNE" + ], + [ + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vduph_laneq_s16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_s64]", "arguments": [ - "int16x8_t vec", - "const int lane" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "int16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -26790,27 +32295,33 @@ ], "instructions": [ [ - "DUP" + "CMPNE" + ], + [ + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vduph_laneq_u16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_s8]", "arguments": [ - "uint16x8_t vec", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -26818,87 +32329,101 @@ ], "instructions": [ [ - "DUP" + "CMPNE" + ], + [ + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_f16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_u16]", "arguments": [ - "float16x4_t vec", - "const int lane" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" + ], + [ + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_f32", + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_u32]", "arguments": [ - "float32x2_t vec", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" + ], + [ + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_f64", + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_u64]", "arguments": [ - "float64x1_t vec", - "const int lane" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -26906,356 +32431,383 @@ ], "instructions": [ [ - "DUP" + "CMPNE" + ], + [ + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_p16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_u8]", "arguments": [ - "poly16x4_t vec", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "poly16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" + ], + [ + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_p64", + "SIMD_ISA": "SVE", + "name": "svcmpne[_s16]", "arguments": [ - "poly64x1_t vec", - "const int lane" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "poly64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_p8", + "SIMD_ISA": "SVE", + "name": "svcmpne[_s32]", "arguments": [ - "poly8x8_t vec", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "poly8x16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_s16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_s64]", "arguments": [ - "int16x4_t vec", - "const int lane" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_s32", + "SIMD_ISA": "SVE", + "name": "svcmpne[_s8]", "arguments": [ - "int32x2_t vec", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_s64", + "SIMD_ISA": "SVE", + "name": "svcmpne[_u16]", "arguments": [ - "int64x1_t vec", - "const int lane" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_s8", + "SIMD_ISA": "SVE", + "name": "svcmpne[_u32]", "arguments": [ - "int8x8_t vec", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "int8x16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_u16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_u64]", "arguments": [ - "uint16x4_t vec", - "const int lane" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_u32", + "SIMD_ISA": "SVE", + "name": "svcmpne[_u8]", "arguments": [ - "uint32x2_t vec", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_u64", + "SIMD_ISA": "SVE", + "name": "svcmpne_wide[_n_s16]", "arguments": [ - "uint64x1_t vec", - "const int lane" + "svbool_t pg", + "svint16_t op1", + "int64_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" + ], + [ + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_u8", + "SIMD_ISA": "SVE", + "name": "svcmpne_wide[_n_s32]", "arguments": [ - "uint8x8_t vec", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "int64_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" + ], + [ + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_f16", + "SIMD_ISA": "SVE", + "name": "svcmpne_wide[_n_s8]", "arguments": [ - "float16x8_t vec", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "int64_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -27263,27 +32815,33 @@ ], "instructions": [ [ - "DUP" + "CMPNE" + ], + [ + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_f32", + "SIMD_ISA": "SVE", + "name": "svcmpne_wide[_s16]", "arguments": [ - "float32x4_t vec", - "const int lane" + "svbool_t pg", + "svint16_t op1", + "svint64_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.4S" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -27291,27 +32849,30 @@ ], "instructions": [ [ - "DUP" + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_f64", + "SIMD_ISA": "SVE", + "name": "svcmpne_wide[_s32]", "arguments": [ - "float64x2_t vec", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "svint64_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.2D" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -27319,27 +32880,30 @@ ], "instructions": [ [ - "DUP" + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_p16", + "SIMD_ISA": "SVE", + "name": "svcmpne_wide[_s8]", "arguments": [ - "poly16x8_t vec", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "svint64_t op2" ], "return_type": { - "value": "poly16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -27347,27 +32911,30 @@ ], "instructions": [ [ - "DUP" + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_p64", + "SIMD_ISA": "SVE", + "name": "svcmpuo[_f16]", "arguments": [ - "poly64x2_t vec", - "const int lane" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "poly64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.2D" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -27375,27 +32942,30 @@ ], "instructions": [ [ - "DUP" + "FCMUO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_p8", + "SIMD_ISA": "SVE", + "name": "svcmpuo[_f32]", "arguments": [ - "poly8x16_t vec", - "const int lane" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "poly8x16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -27403,27 +32973,30 @@ ], "instructions": [ [ - "DUP" + "FCMUO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_s16", + "SIMD_ISA": "SVE", + "name": "svcmpuo[_f64]", "arguments": [ - "int16x8_t vec", - "const int lane" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -27431,27 +33004,30 @@ ], "instructions": [ [ - "DUP" + "FCMUO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_s32", + "SIMD_ISA": "SVE", + "name": "svcmpuo[_n_f16]", "arguments": [ - "int32x4_t vec", - "const int lane" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.4S" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -27459,27 +33035,30 @@ ], "instructions": [ [ - "DUP" + "FCMUO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_s64", + "SIMD_ISA": "SVE", + "name": "svcmpuo[_n_f32]", "arguments": [ - "int64x2_t vec", - "const int lane" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.2D" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -27487,27 +33066,30 @@ ], "instructions": [ [ - "DUP" + "FCMUO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_s8", + "SIMD_ISA": "SVE", + "name": "svcmpuo[_n_f64]", "arguments": [ - "int8x16_t vec", - "const int lane" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "int8x16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -27515,27 +33097,30 @@ ], "instructions": [ [ - "DUP" + "FCMUO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_u16", + "SIMD_ISA": "SVE", + "name": "svcnot[_s16]_m", "arguments": [ - "uint16x8_t vec", - "const int lane" + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "inactive": { + "register": "Zinactive.H|Ztied.H" }, - "vec": { - "register": "Vn.8H" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -27543,27 +33128,30 @@ ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_u32", + "SIMD_ISA": "SVE", + "name": "svcnot[_s16]_x", "arguments": [ - "uint32x4_t vec", - "const int lane" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "uint32x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op": { + "register": "Zop.H|Ztied.H" }, - "vec": { - "register": "Vn.4S" + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -27571,27 +33159,30 @@ ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_u64", + "SIMD_ISA": "SVE", + "name": "svcnot[_s16]_z", "arguments": [ - "uint64x2_t vec", - "const int lane" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "uint64x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op": { + "register": "Zop.H" }, - "vec": { - "register": "Vn.2D" + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -27599,27 +33190,31 @@ ], "instructions": [ [ - "DUP" + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_u8", + "SIMD_ISA": "SVE", + "name": "svcnot[_s32]_m", "arguments": [ - "uint8x16_t vec", - "const int lane" + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint8x16_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 + "inactive": { + "register": "Zinactive.S|Ztied.S" }, - "vec": { - "register": "Vn.16B" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -27627,72 +33222,93 @@ ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_f16", + "SIMD_ISA": "SVE", + "name": "svcnot[_s32]_x", "arguments": [ - "float16_t value" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "float16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_f32", + "SIMD_ISA": "SVE", + "name": "svcnot[_s32]_z", "arguments": [ - "float32_t value" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "float32x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_f64", + "SIMD_ISA": "SVE", + "name": "svcnot[_s64]_m", "arguments": [ - "float64_t value" + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "float64x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -27700,301 +33316,375 @@ ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_p16", + "SIMD_ISA": "SVE", + "name": "svcnot[_s64]_x", "arguments": [ - "poly16_t value" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "poly16x8_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_p64", + "SIMD_ISA": "SVE", + "name": "svcnot[_s64]_z", "arguments": [ - "poly64_t value" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "poly64x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "DUP" + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_p8", + "SIMD_ISA": "SVE", + "name": "svcnot[_s8]_m", "arguments": [ - "poly8_t value" + "svint8_t inactive", + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "poly8x16_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_s16", + "SIMD_ISA": "SVE", + "name": "svcnot[_s8]_x", "arguments": [ - "int16_t value" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "int16x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_s32", + "SIMD_ISA": "SVE", + "name": "svcnot[_s8]_z", "arguments": [ - "int32_t value" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "int32x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_s64", + "SIMD_ISA": "SVE", + "name": "svcnot[_u16]_m", "arguments": [ - "int64_t value" + "svuint16_t inactive", + "svbool_t pg", + "svuint16_t op" ], "return_type": { - "value": "int64x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_s8", + "SIMD_ISA": "SVE", + "name": "svcnot[_u16]_x", "arguments": [ - "int8_t value" + "svbool_t pg", + "svuint16_t op" ], "return_type": { - "value": "int8x16_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_u16", + "SIMD_ISA": "SVE", + "name": "svcnot[_u16]_z", "arguments": [ - "uint16_t value" + "svbool_t pg", + "svuint16_t op" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_u32", + "SIMD_ISA": "SVE", + "name": "svcnot[_u32]_m", "arguments": [ - "uint32_t value" + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_u64", + "SIMD_ISA": "SVE", + "name": "svcnot[_u32]_x", "arguments": [ - "uint64_t value" + "svbool_t pg", + "svuint32_t op" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_u8", + "SIMD_ISA": "SVE", + "name": "svcnot[_u32]_z", "arguments": [ - "uint8_t value" + "svbool_t pg", + "svuint32_t op" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdups_lane_f32", + "SIMD_ISA": "SVE", + "name": "svcnot[_u64]_m", "arguments": [ - "float32x2_t vec", - "const int lane" + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" ], "return_type": { - "value": "float32_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "inactive": { + "register": "Zinactive.D|Ztied.D" }, - "vec": { - "register": "Vn.2S" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -28002,27 +33692,30 @@ ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdups_lane_s32", + "SIMD_ISA": "SVE", + "name": "svcnot[_u64]_x", "arguments": [ - "int32x2_t vec", - "const int lane" + "svbool_t pg", + "svuint64_t op" ], "return_type": { - "value": "int32_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op": { + "register": "Zop.D|Ztied.D" }, - "vec": { - "register": "Vn.2S" + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -28030,27 +33723,30 @@ ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdups_lane_u32", + "SIMD_ISA": "SVE", + "name": "svcnot[_u64]_z", "arguments": [ - "uint32x2_t vec", - "const int lane" + "svbool_t pg", + "svuint64_t op" ], "return_type": { - "value": "uint32_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op": { + "register": "Zop.D" }, - "vec": { - "register": "Vn.2S" + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -28058,27 +33754,31 @@ ], "instructions": [ [ - "DUP" + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdups_laneq_f32", + "SIMD_ISA": "SVE", + "name": "svcnot[_u8]_m", "arguments": [ - "float32x4_t vec", - "const int lane" + "svuint8_t inactive", + "svbool_t pg", + "svuint8_t op" ], "return_type": { - "value": "float32_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "inactive": { + "register": "Zinactive.B|Ztied.B" }, - "vec": { - "register": "Vn.4S" + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -28086,27 +33786,30 @@ ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdups_laneq_s32", + "SIMD_ISA": "SVE", + "name": "svcnot[_u8]_x", "arguments": [ - "int32x4_t vec", - "const int lane" + "svbool_t pg", + "svuint8_t op" ], "return_type": { - "value": "int32_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op": { + "register": "Zop.B|Ztied.B" }, - "vec": { - "register": "Vn.4S" + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -28114,27 +33817,30 @@ ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdups_laneq_u32", + "SIMD_ISA": "SVE", + "name": "svcnot[_u8]_z", "arguments": [ - "uint32x4_t vec", - "const int lane" + "svbool_t pg", + "svuint8_t op" ], "return_type": { - "value": "uint32_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op": { + "register": "Zop.B" }, - "vec": { - "register": "Vn.4S" + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -28142,779 +33848,845 @@ ], "instructions": [ [ - "DUP" + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor3q_s16", + "SIMD_ISA": "SVE", + "name": "svcnt[_f16]_m", "arguments": [ - "int16x8_t a", - "int16x8_t b", - "int16x8_t c" + "svuint16_t inactive", + "svbool_t pg", + "svfloat16_t op" ], "return_type": { - "value": "int16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" }, - "b": {}, - "c": {} + "pg": { + "register": "Pg.H" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "EOR3" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor3q_s32", + "SIMD_ISA": "SVE", + "name": "svcnt[_f16]_x", "arguments": [ - "int32x4_t a", - "int32x4_t b", - "int32x4_t c" + "svbool_t pg", + "svfloat16_t op" ], "return_type": { - "value": "int32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.H|Ztied.H" }, - "b": {}, - "c": {} + "pg": { + "register": "Pg.H" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "EOR3" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor3q_s64", + "SIMD_ISA": "SVE", + "name": "svcnt[_f16]_z", "arguments": [ - "int64x2_t a", - "int64x2_t b", - "int64x2_t c" + "svbool_t pg", + "svfloat16_t op" ], "return_type": { - "value": "int64x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.H" }, - "b": {}, - "c": {} + "pg": { + "register": "Pg.H" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "EOR3" + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor3q_s8", + "SIMD_ISA": "SVE", + "name": "svcnt[_f32]_m", "arguments": [ - "int8x16_t a", - "int8x16_t b", - "int8x16_t c" + "svuint32_t inactive", + "svbool_t pg", + "svfloat32_t op" ], "return_type": { - "value": "int8x16_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" }, - "b": {}, - "c": {} + "pg": { + "register": "Pg.S" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "EOR3" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor3q_u16", + "SIMD_ISA": "SVE", + "name": "svcnt[_f32]_x", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", - "uint16x8_t c" + "svbool_t pg", + "svfloat32_t op" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.S|Ztied.S" }, - "b": {}, - "c": {} + "pg": { + "register": "Pg.S" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "EOR3" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor3q_u32", + "SIMD_ISA": "SVE", + "name": "svcnt[_f32]_z", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c" + "svbool_t pg", + "svfloat32_t op" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.S" }, - "b": {}, - "c": {} + "pg": { + "register": "Pg.S" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "EOR3" + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor3q_u64", + "SIMD_ISA": "SVE", + "name": "svcnt[_f64]_m", "arguments": [ - "uint64x2_t a", - "uint64x2_t b", - "uint64x2_t c" + "svuint64_t inactive", + "svbool_t pg", + "svfloat64_t op" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" }, - "b": {}, - "c": {} + "pg": { + "register": "Pg.D" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "EOR3" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor3q_u8", + "SIMD_ISA": "SVE", + "name": "svcnt[_f64]_x", "arguments": [ - "uint8x16_t a", - "uint8x16_t b", - "uint8x16_t c" + "svbool_t pg", + "svfloat64_t op" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.D|Ztied.D" }, - "b": {}, - "c": {} + "pg": { + "register": "Pg.D" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "EOR3" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor_s16", + "SIMD_ISA": "SVE", + "name": "svcnt[_f64]_z", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svbool_t pg", + "svfloat64_t op" ], "return_type": { - "value": "int16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op": { + "register": "Zop.D" }, - "b": { - "register": "Vm.8B" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor_s32", + "SIMD_ISA": "SVE", + "name": "svcnt[_s16]_m", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svuint16_t inactive", + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "int32x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "inactive": { + "register": "Zinactive.H|Ztied.H" }, - "b": { - "register": "Vm.8B" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor_s64", + "SIMD_ISA": "SVE", + "name": "svcnt[_s16]_x", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "int64x1_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op": { + "register": "Zop.H|Ztied.H" }, - "b": { - "register": "Vm.8B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor_s8", + "SIMD_ISA": "SVE", + "name": "svcnt[_s16]_z", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "int8x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op": { + "register": "Zop.H" }, - "b": { - "register": "Vm.8B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor_u16", + "SIMD_ISA": "SVE", + "name": "svcnt[_s32]_m", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svuint32_t inactive", + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "inactive": { + "register": "Zinactive.S|Ztied.S" }, - "b": { - "register": "Vm.8B" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor_u32", + "SIMD_ISA": "SVE", + "name": "svcnt[_s32]_x", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op": { + "register": "Zop.S|Ztied.S" }, - "b": { - "register": "Vm.8B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor_u64", + "SIMD_ISA": "SVE", + "name": "svcnt[_s32]_z", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op": { + "register": "Zop.S" }, - "b": { - "register": "Vm.8B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor_u8", + "SIMD_ISA": "SVE", + "name": "svcnt[_s64]_m", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svuint64_t inactive", + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "uint8x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "inactive": { + "register": "Zinactive.D|Ztied.D" }, - "b": { - "register": "Vm.8B" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veorq_s16", + "SIMD_ISA": "SVE", + "name": "svcnt[_s64]_x", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "int16x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.D|Ztied.D" }, - "b": { - "register": "Vm.16B" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veorq_s32", + "SIMD_ISA": "SVE", + "name": "svcnt[_s64]_z", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "int32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.D" }, - "b": { - "register": "Vm.16B" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veorq_s64", + "SIMD_ISA": "SVE", + "name": "svcnt[_s8]_m", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "svuint8_t inactive", + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "int64x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "inactive": { + "register": "Zinactive.B|Ztied.B" }, - "b": { - "register": "Vm.16B" + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veorq_s8", + "SIMD_ISA": "SVE", + "name": "svcnt[_s8]_x", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "int8x16_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.B|Ztied.B" }, - "b": { - "register": "Vm.16B" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veorq_u16", + "SIMD_ISA": "SVE", + "name": "svcnt[_s8]_z", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.B" }, - "b": { - "register": "Vm.16B" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veorq_u32", + "SIMD_ISA": "SVE", + "name": "svcnt[_u16]_m", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svuint16_t inactive", + "svbool_t pg", + "svuint16_t op" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "inactive": { + "register": "Zinactive.H|Ztied.H" }, - "b": { - "register": "Vm.16B" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veorq_u64", + "SIMD_ISA": "SVE", + "name": "svcnt[_u16]_x", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "svbool_t pg", + "svuint16_t op" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.H|Ztied.H" }, - "b": { - "register": "Vm.16B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veorq_u8", + "SIMD_ISA": "SVE", + "name": "svcnt[_u16]_z", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svbool_t pg", + "svuint16_t op" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.H" }, - "b": { - "register": "Vm.16B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vext_f16", + "SIMD_ISA": "SVE", + "name": "svcnt[_u32]_m", "arguments": [ - "float16x4_t a", - "float16x4_t b", - "const int n" + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" ], "return_type": { - "value": "float16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "inactive": { + "register": "Zinactive.S|Ztied.S" }, - "b": { - "register": "Vm.8B" + "op": { + "register": "Zop.S" }, - "n": { - "minimum": 0, - "maximum": 3 + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EXT" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vext_f32", + "SIMD_ISA": "SVE", + "name": "svcnt[_u32]_x", "arguments": [ - "float32x2_t a", - "float32x2_t b", - "const int n" + "svbool_t pg", + "svuint32_t op" ], "return_type": { - "value": "float32x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op": { + "register": "Zop.S|Ztied.S" }, - "b": { - "register": "Vm.8B" - }, - "n": { - "minimum": 0, - "maximum": 1 + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EXT" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vext_f64", + "SIMD_ISA": "SVE", + "name": "svcnt[_u32]_z", "arguments": [ - "float64x1_t a", - "float64x1_t b", - "const int n" + "svbool_t pg", + "svuint32_t op" ], "return_type": { - "value": "float64x1_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op": { + "register": "Zop.S" }, - "b": { - "register": "Vm.8B" - }, - "n": { - "minimum": 0, - "maximum": 0 + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -28922,266 +34694,197743 @@ ], "instructions": [ [ - "EXT" + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vext_p16", + "SIMD_ISA": "SVE", + "name": "svcnt[_u64]_m", "arguments": [ - "poly16x4_t a", - "poly16x4_t b", - "const int n" + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" ], "return_type": { - "value": "poly16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "inactive": { + "register": "Zinactive.D|Ztied.D" }, - "b": { - "register": "Vm.8B" + "op": { + "register": "Zop.D" }, - "n": { - "minimum": 0, - "maximum": 3 + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EXT" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vext_p64", + "SIMD_ISA": "SVE", + "name": "svcnt[_u64]_x", "arguments": [ - "poly64x1_t a", - "poly64x1_t b", - "const int n" + "svbool_t pg", + "svuint64_t op" ], "return_type": { - "value": "poly64x1_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "op": { + "register": "Zop.D|Ztied.D" }, - "n": { - "minimum": 0, - "maximum": 0 + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "EXT" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vext_p8", + "SIMD_ISA": "SVE", + "name": "svcnt[_u64]_z", "arguments": [ - "poly8x8_t a", - "poly8x8_t b", - "const int n" + "svbool_t pg", + "svuint64_t op" ], "return_type": { - "value": "poly8x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "op": { + "register": "Zop.D" }, - "n": { - "minimum": 0, - "maximum": 7 + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EXT" + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vext_s16", + "SIMD_ISA": "SVE", + "name": "svcnt[_u8]_m", "arguments": [ - "int16x4_t a", - "int16x4_t b", - "const int n" + "svuint8_t inactive", + "svbool_t pg", + "svuint8_t op" ], "return_type": { - "value": "int16x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "inactive": { + "register": "Zinactive.B|Ztied.B" }, - "b": { - "register": "Vm.8B" + "op": { + "register": "Zop.B" }, - "n": { - "minimum": 0, - "maximum": 3 + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EXT" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vext_s32", + "SIMD_ISA": "SVE", + "name": "svcnt[_u8]_x", "arguments": [ - "int32x2_t a", - "int32x2_t b", - "const int n" + "svbool_t pg", + "svuint8_t op" ], "return_type": { - "value": "int32x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "op": { + "register": "Zop.B|Ztied.B" }, - "n": { - "minimum": 0, - "maximum": 1 + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EXT" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vext_s64", + "SIMD_ISA": "SVE", + "name": "svcnt[_u8]_z", "arguments": [ - "int64x1_t a", - "int64x1_t b", - "const int n" + "svbool_t pg", + "svuint8_t op" ], "return_type": { - "value": "int64x1_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "op": { + "register": "Zop.B" }, - "n": { - "minimum": 0, - "maximum": 0 + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EXT" + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vext_s8", - "arguments": [ - "int8x8_t a", - "int8x8_t b", - "const int n" - ], + "SIMD_ISA": "SVE", + "name": "svcntb", + "arguments": [], "return_type": { - "value": "int8x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" - }, - "n": { - "minimum": 0, - "maximum": 7 - } + "value": "uint64_t" }, + "Arguments_Preparation": {}, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EXT" + "CNTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vext_u16", + "SIMD_ISA": "SVE", + "name": "svcntb_pat", "arguments": [ - "uint16x4_t a", - "uint16x4_t b", - "const int n" + "enum svpattern pattern" ], "return_type": { - "value": "uint16x4_t" + "value": "uint64_t" }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" - }, - "n": { + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntd", + "arguments": [], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntd_pat", + "arguments": [ + "enum svpattern pattern" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnth", + "arguments": [], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnth_pat", + "arguments": [ + "enum svpattern pattern" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntp_b16", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntp_b32", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntp_b64", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntp_b8", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcntp_c16", + "arguments": [ + "svcount_t pnn", + "uint64_t vl" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "pnn": { + "register": "PNreg1.H" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcntp_c32", + "arguments": [ + "svcount_t pnn", + "uint64_t vl" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "pnn": { + "register": "PNreg1.S" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcntp_c64", + "arguments": [ + "svcount_t pnn", + "uint64_t vl" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "pnn": { + "register": "PNreg1.D" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcntp_c8", + "arguments": [ + "svcount_t pnn", + "uint64_t vl" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "pnn": { + "register": "PNreg1.B" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntw", + "arguments": [], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntw_pat", + "arguments": [ + "enum svpattern pattern" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcompact[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "COMPACT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcompact[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "COMPACT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcompact[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "COMPACT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcompact[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "COMPACT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcompact[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "COMPACT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcompact[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "COMPACT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcreate2[_b]", + "arguments": [ + "svbool_t x", + "svbool_t y" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_f16]", + "arguments": [ + "svfloat16_t x0", + "svfloat16_t x1" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_f32]", + "arguments": [ + "svfloat32_t x0", + "svfloat32_t x1" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_f64]", + "arguments": [ + "svfloat64_t x0", + "svfloat64_t x1" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_s16]", + "arguments": [ + "svint16_t x0", + "svint16_t x1" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_s32]", + "arguments": [ + "svint32_t x0", + "svint32_t x1" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_s64]", + "arguments": [ + "svint64_t x0", + "svint64_t x1" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_s8]", + "arguments": [ + "svint8_t x0", + "svint8_t x1" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_u16]", + "arguments": [ + "svuint16_t x0", + "svuint16_t x1" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_u32]", + "arguments": [ + "svuint32_t x0", + "svuint32_t x1" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_u64]", + "arguments": [ + "svuint64_t x0", + "svuint64_t x1" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_u8]", + "arguments": [ + "svuint8_t x0", + "svuint8_t x1" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_f16]", + "arguments": [ + "svfloat16_t x0", + "svfloat16_t x1", + "svfloat16_t x2" + ], + "return_type": { + "value": "svfloat16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_f32]", + "arguments": [ + "svfloat32_t x0", + "svfloat32_t x1", + "svfloat32_t x2" + ], + "return_type": { + "value": "svfloat32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_f64]", + "arguments": [ + "svfloat64_t x0", + "svfloat64_t x1", + "svfloat64_t x2" + ], + "return_type": { + "value": "svfloat64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_s16]", + "arguments": [ + "svint16_t x0", + "svint16_t x1", + "svint16_t x2" + ], + "return_type": { + "value": "svint16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_s32]", + "arguments": [ + "svint32_t x0", + "svint32_t x1", + "svint32_t x2" + ], + "return_type": { + "value": "svint32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_s64]", + "arguments": [ + "svint64_t x0", + "svint64_t x1", + "svint64_t x2" + ], + "return_type": { + "value": "svint64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_s8]", + "arguments": [ + "svint8_t x0", + "svint8_t x1", + "svint8_t x2" + ], + "return_type": { + "value": "svint8x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_u16]", + "arguments": [ + "svuint16_t x0", + "svuint16_t x1", + "svuint16_t x2" + ], + "return_type": { + "value": "svuint16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_u32]", + "arguments": [ + "svuint32_t x0", + "svuint32_t x1", + "svuint32_t x2" + ], + "return_type": { + "value": "svuint32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_u64]", + "arguments": [ + "svuint64_t x0", + "svuint64_t x1", + "svuint64_t x2" + ], + "return_type": { + "value": "svuint64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_u8]", + "arguments": [ + "svuint8_t x0", + "svuint8_t x1", + "svuint8_t x2" + ], + "return_type": { + "value": "svuint8x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcreate4[_b]", + "arguments": [ + "svbool_t x", + "svbool_t y", + "svbool_t z", + "svbool_t w" + ], + "return_type": { + "value": "svboolx4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_f16]", + "arguments": [ + "svfloat16_t x0", + "svfloat16_t x1", + "svfloat16_t x2", + "svfloat16_t x3" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_f32]", + "arguments": [ + "svfloat32_t x0", + "svfloat32_t x1", + "svfloat32_t x2", + "svfloat32_t x3" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_f64]", + "arguments": [ + "svfloat64_t x0", + "svfloat64_t x1", + "svfloat64_t x2", + "svfloat64_t x3" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_s16]", + "arguments": [ + "svint16_t x0", + "svint16_t x1", + "svint16_t x2", + "svint16_t x3" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_s32]", + "arguments": [ + "svint32_t x0", + "svint32_t x1", + "svint32_t x2", + "svint32_t x3" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_s64]", + "arguments": [ + "svint64_t x0", + "svint64_t x1", + "svint64_t x2", + "svint64_t x3" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_s8]", + "arguments": [ + "svint8_t x0", + "svint8_t x1", + "svint8_t x2", + "svint8_t x3" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_u16]", + "arguments": [ + "svuint16_t x0", + "svuint16_t x1", + "svuint16_t x2", + "svuint16_t x3" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_u32]", + "arguments": [ + "svuint32_t x0", + "svuint32_t x1", + "svuint32_t x2", + "svuint32_t x3" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_u64]", + "arguments": [ + "svuint64_t x0", + "svuint64_t x1", + "svuint64_t x2", + "svuint64_t x3" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_u8]", + "arguments": [ + "svuint8_t x0", + "svuint8_t x1", + "svuint8_t x2", + "svuint8_t x3" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_f32]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_f64]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s32]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s64]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u32]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u64]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_f16]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_f64]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_s32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_s64]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_u32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_u64]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_f16]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_f32]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_s32]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_s64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_u32]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_u64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s16[_f16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s16[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s16[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f16]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f64]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f16]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f32]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u16[_f16]_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u16[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u16[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f16]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f64]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f16]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f32]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtlt_f32[_f16]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtlt_f32[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtlt_f64[_f32]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.D" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtlt_f64[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtnt_f16[_f32]_m", + "arguments": [ + "svfloat16_t even", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtnt_f16[_f32]_x", + "arguments": [ + "svfloat16_t even", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtnt_f32[_f64]_m", + "arguments": [ + "svfloat32_t even", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtnt_f32[_f64]_x", + "arguments": [ + "svfloat32_t even", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtx_f32[_f64]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTX" + ], + [ + "MOVPRFX", + "FCVTX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtx_f32[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTX" + ], + [ + "MOVPRFX", + "FCVTX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtx_f32[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtxnt_f32[_f64]_m", + "arguments": [ + "svfloat32_t even", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTXNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtxnt_f32[_f64]_x", + "arguments": [ + "svfloat32_t even", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTXNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svdot[_f32_f16]", + "arguments": [ + "svfloat32_t zda", + "svfloat16_t zn", + "svfloat16_t zm" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "zda": { + "register": "Zreg1.S" + }, + "zm": { + "register": "Zreg3.H" + }, + "zn": { + "register": "Zreg2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ], + [ + "MOVPRFX", + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ], + [ + "MOVPRFX", + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ], + [ + "MOVPRFX", + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ], + [ + "MOVPRFX", + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ], + [ + "MOVPRFX", + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svdot[_s32_s16]", + "arguments": [ + "svint32_t zda", + "svint16_t zn", + "svint16_t zm" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "zda": { + "register": "Zreg1.S" + }, + "zm": { + "register": "Zreg3.H" + }, + "zn": { + "register": "Zreg2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_s64]", + "arguments": [ + "svint64_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ], + [ + "MOVPRFX", + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ], + [ + "MOVPRFX", + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svdot[_u32_u16]", + "arguments": [ + "svuint32_t zda", + "svuint16_t zn", + "svuint16_t zm" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "zda": { + "register": "Zreg1.S" + }, + "zm": { + "register": "Zreg3.H" + }, + "zn": { + "register": "Zreg2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ], + [ + "MOVPRFX", + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svdot_lane[_f32_f16]", + "arguments": [ + "svfloat32_t zda", + "svfloat16_t zn", + "svfloat16_t zm", + "uint64_t imm_idx" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_idx": { + "immediate": "imm1" + }, + "zda": { + "register": "Zreg1.S" + }, + "zm": { + "register": "Zreg3.H" + }, + "zn": { + "register": "Zreg2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "svint8_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ], + [ + "MOVPRFX", + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svdot_lane[_s32_s16]", + "arguments": [ + "svint32_t zda", + "svint16_t zn", + "svint16_t zm", + "uint64_t imm_idx" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_idx": { + "immediate": "imm1" + }, + "zda": { + "register": "Zreg1.S" + }, + "zm": { + "register": "Zreg3.H" + }, + "zn": { + "register": "Zreg2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ], + [ + "MOVPRFX", + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint8_t op2", + "svuint8_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ], + [ + "MOVPRFX", + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svdot_lane[_u32_u16]", + "arguments": [ + "svuint32_t zda", + "svuint16_t zn", + "svuint16_t zm", + "uint64_t imm_idx" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_idx": { + "immediate": "imm1" + }, + "zda": { + "register": "Zreg1.S" + }, + "zm": { + "register": "Zreg3.H" + }, + "zn": { + "register": "Zreg2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ], + [ + "MOVPRFX", + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_b16", + "arguments": [ + "bool op" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_b32", + "arguments": [ + "bool op" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_b64", + "arguments": [ + "bool op" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_b8", + "arguments": [ + "bool op" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f16", + "arguments": [ + "float16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f16_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "float16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.H" + }, + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f16_x", + "arguments": [ + "svbool_t pg", + "float16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f16_z", + "arguments": [ + "svbool_t pg", + "float16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f32", + "arguments": [ + "float32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f32_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "float32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.S" + }, + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f32_x", + "arguments": [ + "svbool_t pg", + "float32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f32_z", + "arguments": [ + "svbool_t pg", + "float32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f64", + "arguments": [ + "float64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f64_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "float64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.D" + }, + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f64_x", + "arguments": [ + "svbool_t pg", + "float64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f64_z", + "arguments": [ + "svbool_t pg", + "float64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s16", + "arguments": [ + "int16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s16_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "int16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.H" + }, + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s16_x", + "arguments": [ + "svbool_t pg", + "int16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s16_z", + "arguments": [ + "svbool_t pg", + "int16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s32", + "arguments": [ + "int32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s32_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "int32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.S" + }, + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s32_x", + "arguments": [ + "svbool_t pg", + "int32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s32_z", + "arguments": [ + "svbool_t pg", + "int32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s64", + "arguments": [ + "int64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s64_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "int64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.D" + }, + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s64_x", + "arguments": [ + "svbool_t pg", + "int64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s64_z", + "arguments": [ + "svbool_t pg", + "int64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s8", + "arguments": [ + "int8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Bop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s8_m", + "arguments": [ + "svint8_t inactive", + "svbool_t pg", + "int8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.B" + }, + "op": { + "register": "Bop|Wop" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s8_x", + "arguments": [ + "svbool_t pg", + "int8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Bop|Wop" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s8_z", + "arguments": [ + "svbool_t pg", + "int8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Bop|Wop" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u16", + "arguments": [ + "uint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u16_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "uint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.H" + }, + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u16_x", + "arguments": [ + "svbool_t pg", + "uint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u16_z", + "arguments": [ + "svbool_t pg", + "uint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u32", + "arguments": [ + "uint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u32_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "uint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.S" + }, + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u32_x", + "arguments": [ + "svbool_t pg", + "uint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u32_z", + "arguments": [ + "svbool_t pg", + "uint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u64", + "arguments": [ + "uint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u64_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "uint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.D" + }, + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u64_x", + "arguments": [ + "svbool_t pg", + "uint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u64_z", + "arguments": [ + "svbool_t pg", + "uint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u8", + "arguments": [ + "uint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Bop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u8_m", + "arguments": [ + "svuint8_t inactive", + "svbool_t pg", + "uint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.B" + }, + "op": { + "register": "Bop|Wop" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u8_x", + "arguments": [ + "svbool_t pg", + "uint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Bop|Wop" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u8_z", + "arguments": [ + "svbool_t pg", + "uint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Bop|Wop" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_f16]", + "arguments": [ + "svfloat16_t data", + "uint16_t index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "index": { + "register": "Zindex.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_f32]", + "arguments": [ + "svfloat32_t data", + "uint32_t index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "index": { + "register": "Zindex.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_f64]", + "arguments": [ + "svfloat64_t data", + "uint64_t index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "index": { + "register": "Zindex.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_s16]", + "arguments": [ + "svint16_t data", + "uint16_t index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "index": { + "register": "Zindex.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_s32]", + "arguments": [ + "svint32_t data", + "uint32_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "index": { + "register": "Zindex.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_s64]", + "arguments": [ + "svint64_t data", + "uint64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "index": { + "register": "Zindex.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_s8]", + "arguments": [ + "svint8_t data", + "uint8_t index" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "index": { + "register": "Zindex.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_u16]", + "arguments": [ + "svuint16_t data", + "uint16_t index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "index": { + "register": "Zindex.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_u32]", + "arguments": [ + "svuint32_t data", + "uint32_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "index": { + "register": "Zindex.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_u64]", + "arguments": [ + "svuint64_t data", + "uint64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "index": { + "register": "Zindex.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_u8]", + "arguments": [ + "svuint8_t data", + "uint8_t index" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "index": { + "register": "Zindex.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_b16", + "arguments": [ + "bool x0", + "bool x1", + "bool x2", + "bool x3", + "bool x4", + "bool x5", + "bool x6", + "bool x7" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_b32", + "arguments": [ + "bool x0", + "bool x1", + "bool x2", + "bool x3" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_b64", + "arguments": [ + "bool x0", + "bool x1" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_b8", + "arguments": [ + "bool x0", + "bool x1", + "bool x2", + "bool x3", + "bool x4", + "bool x5", + "bool x6", + "bool x7", + "bool x8", + "bool x9", + "bool x10", + "bool x11", + "bool x12", + "bool x13", + "bool x14", + "bool x15" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_f16", + "arguments": [ + "float16_t x0", + "float16_t x1", + "float16_t x2", + "float16_t x3", + "float16_t x4", + "float16_t x5", + "float16_t x6", + "float16_t x7" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_f32", + "arguments": [ + "float32_t x0", + "float32_t x1", + "float32_t x2", + "float32_t x3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_f64", + "arguments": [ + "float64_t x0", + "float64_t x1" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_s16", + "arguments": [ + "int16_t x0", + "int16_t x1", + "int16_t x2", + "int16_t x3", + "int16_t x4", + "int16_t x5", + "int16_t x6", + "int16_t x7" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_s32", + "arguments": [ + "int32_t x0", + "int32_t x1", + "int32_t x2", + "int32_t x3" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_s64", + "arguments": [ + "int64_t x0", + "int64_t x1" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_s8", + "arguments": [ + "int8_t x0", + "int8_t x1", + "int8_t x2", + "int8_t x3", + "int8_t x4", + "int8_t x5", + "int8_t x6", + "int8_t x7", + "int8_t x8", + "int8_t x9", + "int8_t x10", + "int8_t x11", + "int8_t x12", + "int8_t x13", + "int8_t x14", + "int8_t x15" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_u16", + "arguments": [ + "uint16_t x0", + "uint16_t x1", + "uint16_t x2", + "uint16_t x3", + "uint16_t x4", + "uint16_t x5", + "uint16_t x6", + "uint16_t x7" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_u32", + "arguments": [ + "uint32_t x0", + "uint32_t x1", + "uint32_t x2", + "uint32_t x3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_u64", + "arguments": [ + "uint64_t x0", + "uint64_t x1" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_u8", + "arguments": [ + "uint8_t x0", + "uint8_t x1", + "uint8_t x2", + "uint8_t x3", + "uint8_t x4", + "uint8_t x5", + "uint8_t x6", + "uint8_t x7", + "uint8_t x8", + "uint8_t x9", + "uint8_t x10", + "uint8_t x11", + "uint8_t x12", + "uint8_t x13", + "uint8_t x14", + "uint8_t x15" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_f16]", + "arguments": [ + "svfloat16_t data", + "uint64_t index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_f32]", + "arguments": [ + "svfloat32_t data", + "uint64_t index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_f64]", + "arguments": [ + "svfloat64_t data", + "uint64_t index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_s16]", + "arguments": [ + "svint16_t data", + "uint64_t index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_s32]", + "arguments": [ + "svint32_t data", + "uint64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_s64]", + "arguments": [ + "svint64_t data", + "uint64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_s8]", + "arguments": [ + "svint8_t data", + "uint64_t index" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_u16]", + "arguments": [ + "svuint16_t data", + "uint64_t index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_u32]", + "arguments": [ + "svuint32_t data", + "uint64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_u64]", + "arguments": [ + "svuint64_t data", + "uint64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_u8]", + "arguments": [ + "svuint8_t data", + "uint64_t index" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_s16]", + "arguments": [ + "svint16_t odd", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_s32]", + "arguments": [ + "svint32_t odd", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_s64]", + "arguments": [ + "svint64_t odd", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_s8]", + "arguments": [ + "svint8_t odd", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_u16]", + "arguments": [ + "svuint16_t odd", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_u32]", + "arguments": [ + "svuint32_t odd", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_u64]", + "arguments": [ + "svuint64_t odd", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_u8]", + "arguments": [ + "svuint8_t odd", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_s16]", + "arguments": [ + "svint16_t odd", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_s32]", + "arguments": [ + "svint32_t odd", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_s64]", + "arguments": [ + "svint64_t odd", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_s8]", + "arguments": [ + "svint8_t odd", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_u16]", + "arguments": [ + "svuint16_t odd", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_u32]", + "arguments": [ + "svuint32_t odd", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_u64]", + "arguments": [ + "svuint64_t odd", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_u8]", + "arguments": [ + "svuint8_t odd", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_s16]", + "arguments": [ + "svint16_t even", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_s32]", + "arguments": [ + "svint32_t even", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_s64]", + "arguments": [ + "svint64_t even", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_s8]", + "arguments": [ + "svint8_t even", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_u16]", + "arguments": [ + "svuint16_t even", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_u32]", + "arguments": [ + "svuint32_t even", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_u64]", + "arguments": [ + "svuint64_t even", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_u8]", + "arguments": [ + "svuint8_t even", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_s16]", + "arguments": [ + "svint16_t even", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_s32]", + "arguments": [ + "svint32_t even", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_s64]", + "arguments": [ + "svint64_t even", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_s8]", + "arguments": [ + "svint8_t even", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_u16]", + "arguments": [ + "svuint16_t even", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_u32]", + "arguments": [ + "svuint32_t even", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_u64]", + "arguments": [ + "svuint64_t even", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_u8]", + "arguments": [ + "svuint8_t even", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexpa[_f16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FEXPA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexpa[_f32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FEXPA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexpa[_f64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FEXPA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 127 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 127 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 255 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 127 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 255 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTB" + ], + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTB" + ], + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTB" + ], + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTB" + ], + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTB" + ], + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTB" + ], + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u16]_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "MOVPRFX", + "UXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "MOVPRFX", + "UXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "MOVPRFX", + "UXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTH" + ], + [ + "MOVPRFX", + "SXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTH" + ], + [ + "MOVPRFX", + "SXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTH" + ], + [ + "MOVPRFX", + "SXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTH" + ], + [ + "MOVPRFX", + "SXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTH" + ], + [ + "MOVPRFX", + "UXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTH" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTH" + ], + [ + "MOVPRFX", + "UXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTH" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextw[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTW" + ], + [ + "MOVPRFX", + "SXTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextw[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTW" + ], + [ + "MOVPRFX", + "SXTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextw[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SXTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextw[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTW" + ], + [ + "MOVPRFX", + "UXTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextw[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTW" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextw[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svget2[_b]", + "arguments": [ + "svboolx2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_f16]", + "arguments": [ + "svfloat16x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_f32]", + "arguments": [ + "svfloat32x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_f64]", + "arguments": [ + "svfloat64x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_s16]", + "arguments": [ + "svint16x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_s32]", + "arguments": [ + "svint32x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_s64]", + "arguments": [ + "svint64x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_s8]", + "arguments": [ + "svint8x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_u16]", + "arguments": [ + "svuint16x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_u32]", + "arguments": [ + "svuint32x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_u64]", + "arguments": [ + "svuint64x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_u8]", + "arguments": [ + "svuint8x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_f16]", + "arguments": [ + "svfloat16x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_f32]", + "arguments": [ + "svfloat32x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_f64]", + "arguments": [ + "svfloat64x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_s16]", + "arguments": [ + "svint16x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_s32]", + "arguments": [ + "svint32x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_s64]", + "arguments": [ + "svint64x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_s8]", + "arguments": [ + "svint8x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_u16]", + "arguments": [ + "svuint16x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_u32]", + "arguments": [ + "svuint32x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_u64]", + "arguments": [ + "svuint64x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_u8]", + "arguments": [ + "svuint8x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svget4[_b]", + "arguments": [ + "svboolx4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_f16]", + "arguments": [ + "svfloat16x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_f32]", + "arguments": [ + "svfloat32x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_f64]", + "arguments": [ + "svfloat64x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_s16]", + "arguments": [ + "svint16x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_s32]", + "arguments": [ + "svint32x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_s64]", + "arguments": [ + "svint64x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_s8]", + "arguments": [ + "svint8x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_u16]", + "arguments": [ + "svuint16x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_u32]", + "arguments": [ + "svuint32x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_u64]", + "arguments": [ + "svuint64x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_u8]", + "arguments": [ + "svuint8x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhistcnt[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "HISTCNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhistcnt[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "HISTCNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhistcnt[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "HISTCNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhistcnt[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "HISTCNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhistseg[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "HISTSEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhistseg[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "HISTSEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_s16", + "arguments": [ + "int16_t base", + "int16_t step" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Wbase" + }, + "step": { + "register": "Wstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_s32", + "arguments": [ + "int32_t base", + "int32_t step" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Wbase" + }, + "step": { + "register": "Wstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_s64", + "arguments": [ + "int64_t base", + "int64_t step" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "step": { + "register": "Xstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_s8", + "arguments": [ + "int8_t base", + "int8_t step" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Wbase" + }, + "step": { + "register": "Wstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_u16", + "arguments": [ + "uint16_t base", + "uint16_t step" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Wbase" + }, + "step": { + "register": "Wstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_u32", + "arguments": [ + "uint32_t base", + "uint32_t step" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Wbase" + }, + "step": { + "register": "Wstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_u64", + "arguments": [ + "uint64_t base", + "uint64_t step" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "step": { + "register": "Xstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_u8", + "arguments": [ + "uint8_t base", + "uint8_t step" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Wbase" + }, + "step": { + "register": "Wstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_f16]", + "arguments": [ + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Hop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_f32]", + "arguments": [ + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Sop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_f64]", + "arguments": [ + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Dop2|Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Hop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Sop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Dop2|Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_s8]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Bop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Hop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Sop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Dop2|Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_u8]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Bop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_f16]_x2", + "arguments": [ + "svcount_t png", + "float16_t const * rn" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_f16]_x4", + "arguments": [ + "svcount_t png", + "float16_t const * rn" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_f32]_x2", + "arguments": [ + "svcount_t png", + "float32_t const * rn" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_f32]_x4", + "arguments": [ + "svcount_t png", + "float32_t const * rn" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_f64]_x2", + "arguments": [ + "svcount_t png", + "float64_t const * rn" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_f64]_x4", + "arguments": [ + "svcount_t png", + "float64_t const * rn" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s16]_x2", + "arguments": [ + "svcount_t png", + "int16_t const * rn" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s16]_x4", + "arguments": [ + "svcount_t png", + "int16_t const * rn" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s32]_x2", + "arguments": [ + "svcount_t png", + "int32_t const * rn" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s32]_x4", + "arguments": [ + "svcount_t png", + "int32_t const * rn" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s64]_x2", + "arguments": [ + "svcount_t png", + "int64_t const * rn" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s64]_x4", + "arguments": [ + "svcount_t png", + "int64_t const * rn" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s8]_x2", + "arguments": [ + "svcount_t png", + "int8_t const * rn" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s8]_x4", + "arguments": [ + "svcount_t png", + "int8_t const * rn" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u16]_x2", + "arguments": [ + "svcount_t png", + "uint16_t const * rn" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u16]_x4", + "arguments": [ + "svcount_t png", + "uint16_t const * rn" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u32]_x2", + "arguments": [ + "svcount_t png", + "uint32_t const * rn" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u32]_x4", + "arguments": [ + "svcount_t png", + "uint32_t const * rn" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u64]_x2", + "arguments": [ + "svcount_t png", + "uint64_t const * rn" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u64]_x4", + "arguments": [ + "svcount_t png", + "uint64_t const * rn" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u8]_x2", + "arguments": [ + "svcount_t png", + "uint8_t const * rn" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u8]_x4", + "arguments": [ + "svcount_t png", + "uint8_t const * rn" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_index_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_offset_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_index_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_offset_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s32]index[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s32]index[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s32]index[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s64]index[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u32]index[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u32]index[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u32]index[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u64]index[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_f16]_x2", + "arguments": [ + "svcount_t png", + "float16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_f16]_x4", + "arguments": [ + "svcount_t png", + "float16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_f32]_x2", + "arguments": [ + "svcount_t png", + "float32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_f32]_x4", + "arguments": [ + "svcount_t png", + "float32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_f64]_x2", + "arguments": [ + "svcount_t png", + "float64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_f64]_x4", + "arguments": [ + "svcount_t png", + "float64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s16]_x2", + "arguments": [ + "svcount_t png", + "int16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s16]_x4", + "arguments": [ + "svcount_t png", + "int16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s32]_x2", + "arguments": [ + "svcount_t png", + "int32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s32]_x4", + "arguments": [ + "svcount_t png", + "int32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s64]_x2", + "arguments": [ + "svcount_t png", + "int64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s64]_x4", + "arguments": [ + "svcount_t png", + "int64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s8]_x2", + "arguments": [ + "svcount_t png", + "int8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s8]_x4", + "arguments": [ + "svcount_t png", + "int8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u16]_x2", + "arguments": [ + "svcount_t png", + "uint16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u16]_x4", + "arguments": [ + "svcount_t png", + "uint16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u32]_x2", + "arguments": [ + "svcount_t png", + "uint32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u32]_x4", + "arguments": [ + "svcount_t png", + "uint32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u64]_x2", + "arguments": [ + "svcount_t png", + "uint64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u64]_x4", + "arguments": [ + "svcount_t png", + "uint64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u8]_x2", + "arguments": [ + "svcount_t png", + "uint8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u8]_x4", + "arguments": [ + "svcount_t png", + "uint8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROH" + ], + [ + "LD1ROH" + ], + [ + "LD1ROH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROW" + ], + [ + "LD1ROW" + ], + [ + "LD1ROW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROD" + ], + [ + "LD1ROD" + ], + [ + "LD1ROD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROH" + ], + [ + "LD1ROH" + ], + [ + "LD1ROH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROW" + ], + [ + "LD1ROW" + ], + [ + "LD1ROW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROD" + ], + [ + "LD1ROD" + ], + [ + "LD1ROD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROB" + ], + [ + "LD1ROB" + ], + [ + "LD1ROB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROH" + ], + [ + "LD1ROH" + ], + [ + "LD1ROH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROW" + ], + [ + "LD1ROW" + ], + [ + "LD1ROW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROD" + ], + [ + "LD1ROD" + ], + [ + "LD1ROD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROB" + ], + [ + "LD1ROB" + ], + [ + "LD1ROB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQH" + ], + [ + "LD1RQH" + ], + [ + "LD1RQH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQW" + ], + [ + "LD1RQW" + ], + [ + "LD1RQW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQD" + ], + [ + "LD1RQD" + ], + [ + "LD1RQD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQH" + ], + [ + "LD1RQH" + ], + [ + "LD1RQH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQW" + ], + [ + "LD1RQW" + ], + [ + "LD1RQW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQD" + ], + [ + "LD1RQD" + ], + [ + "LD1RQD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQB" + ], + [ + "LD1RQB" + ], + [ + "LD1RQB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQH" + ], + [ + "LD1RQH" + ], + [ + "LD1RQH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQW" + ], + [ + "LD1RQW" + ], + [ + "LD1RQW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQD" + ], + [ + "LD1RQD" + ], + [ + "LD1RQD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQB" + ], + [ + "LD1RQB" + ], + [ + "LD1RQB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_s16", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_u16", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_vnum_s16", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_vnum_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_vnum_u16", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_vnum_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s32]index_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s32]index_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u32]index_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u32]index_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_vnum_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_vnum_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_s16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_u16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_vnum_s16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_vnum_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_vnum_u16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_vnum_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s32]index_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s32]index_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u32]index_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u32]index_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_vnum_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_vnum_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2H" + ], + [ + "LD2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2W" + ], + [ + "LD2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2D" + ], + [ + "LD2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2H" + ], + [ + "LD2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2W" + ], + [ + "LD2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2D" + ], + [ + "LD2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2B" + ], + [ + "LD2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2H" + ], + [ + "LD2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2W" + ], + [ + "LD2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2D" + ], + [ + "LD2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2B" + ], + [ + "LD2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2H" + ], + [ + "LD2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2W" + ], + [ + "LD2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2D" + ], + [ + "LD2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2H" + ], + [ + "LD2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2W" + ], + [ + "LD2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2D" + ], + [ + "LD2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2B" + ], + [ + "LD2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2H" + ], + [ + "LD2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2W" + ], + [ + "LD2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2D" + ], + [ + "LD2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2B" + ], + [ + "LD2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3H" + ], + [ + "LD3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3W" + ], + [ + "LD3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3D" + ], + [ + "LD3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3H" + ], + [ + "LD3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3W" + ], + [ + "LD3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3D" + ], + [ + "LD3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3B" + ], + [ + "LD3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3H" + ], + [ + "LD3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3W" + ], + [ + "LD3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3D" + ], + [ + "LD3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3B" + ], + [ + "LD3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3H" + ], + [ + "LD3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3W" + ], + [ + "LD3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3D" + ], + [ + "LD3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3H" + ], + [ + "LD3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3W" + ], + [ + "LD3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3D" + ], + [ + "LD3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint8x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3B" + ], + [ + "LD3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3H" + ], + [ + "LD3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3W" + ], + [ + "LD3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3D" + ], + [ + "LD3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3B" + ], + [ + "LD3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4H" + ], + [ + "LD4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4W" + ], + [ + "LD4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4D" + ], + [ + "LD4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4H" + ], + [ + "LD4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4W" + ], + [ + "LD4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4D" + ], + [ + "LD4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4B" + ], + [ + "LD4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4H" + ], + [ + "LD4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4W" + ], + [ + "LD4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4D" + ], + [ + "LD4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4B" + ], + [ + "LD4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4H" + ], + [ + "LD4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4W" + ], + [ + "LD4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4D" + ], + [ + "LD4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4H" + ], + [ + "LD4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4W" + ], + [ + "LD4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4D" + ], + [ + "LD4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4B" + ], + [ + "LD4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4H" + ], + [ + "LD4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4W" + ], + [ + "LD4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4D" + ], + [ + "LD4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4B" + ], + [ + "LD4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_index_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_offset_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_index_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_offset_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s32]index[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s32]index[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s32]index[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s64]index[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u32]index[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u32]index[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u32]index[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u64]index[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_s16", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_u16", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_vnum_s16", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_vnum_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_vnum_u16", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_vnum_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s32]index_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s32]index_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u32]index_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u32]index_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_vnum_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_vnum_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ], + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ], + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ], + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ], + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ], + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ], + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_s16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_u16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_vnum_s16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_vnum_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_vnum_u16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_vnum_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s32]index_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s32]index_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u32]index_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u32]index_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_vnum_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_vnum_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcnth() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ], + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw() * 4": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ], + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 8": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1D" + ], + [ + "LDNF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcnth() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ], + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw() * 4": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ], + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 8": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1D" + ], + [ + "LDNF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntb()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcnth() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ], + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw() * 4": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ], + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 8": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1D" + ], + [ + "LDNF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntb()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_s16", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_u16", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_vnum_s16", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcnth()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ], + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_vnum_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ], + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ], + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_vnum_u16", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcnth()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ], + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_vnum_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ], + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ], + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_vnum_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ], + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ], + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_vnum_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ], + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ], + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sw_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sw_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sw_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 4": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SW" + ], + [ + "LDNF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sw_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 4": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SW" + ], + [ + "LDNF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_s16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_u16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_vnum_s16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcnth()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_vnum_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_vnum_u16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcnth()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_vnum_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_vnum_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ], + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ], + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_vnum_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ], + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ], + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uw_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uw_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uw_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 4": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ], + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uw_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 4": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ], + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ], + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_f16]_x2", + "arguments": [ + "svcount_t png", + "float16_t const * rn" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_f16]_x4", + "arguments": [ + "svcount_t png", + "float16_t const * rn" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ], + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_f32]_x2", + "arguments": [ + "svcount_t png", + "float32_t const * rn" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_f32]_x4", + "arguments": [ + "svcount_t png", + "float32_t const * rn" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ], + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_f64]_x2", + "arguments": [ + "svcount_t png", + "float64_t const * rn" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_f64]_x4", + "arguments": [ + "svcount_t png", + "float64_t const * rn" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ], + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s16]_x2", + "arguments": [ + "svcount_t png", + "int16_t const * rn" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s16]_x4", + "arguments": [ + "svcount_t png", + "int16_t const * rn" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ], + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s32]_x2", + "arguments": [ + "svcount_t png", + "int32_t const * rn" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s32]_x4", + "arguments": [ + "svcount_t png", + "int32_t const * rn" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ], + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s64]_x2", + "arguments": [ + "svcount_t png", + "int64_t const * rn" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s64]_x4", + "arguments": [ + "svcount_t png", + "int64_t const * rn" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ], + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s8]_x2", + "arguments": [ + "svcount_t png", + "int8_t const * rn" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s8]_x4", + "arguments": [ + "svcount_t png", + "int8_t const * rn" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ], + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u16]_x2", + "arguments": [ + "svcount_t png", + "uint16_t const * rn" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u16]_x4", + "arguments": [ + "svcount_t png", + "uint16_t const * rn" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ], + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u32]_x2", + "arguments": [ + "svcount_t png", + "uint32_t const * rn" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u32]_x4", + "arguments": [ + "svcount_t png", + "uint32_t const * rn" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ], + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u64]_x2", + "arguments": [ + "svcount_t png", + "uint64_t const * rn" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u64]_x4", + "arguments": [ + "svcount_t png", + "uint64_t const * rn" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ], + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u8]_x2", + "arguments": [ + "svcount_t png", + "uint8_t const * rn" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u8]_x4", + "arguments": [ + "svcount_t png", + "uint8_t const * rn" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_index_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_offset_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_index_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_offset_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[s64]index[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[s64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u64]index[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ], + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_f16]_x2", + "arguments": [ + "svcount_t png", + "float16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_f16]_x4", + "arguments": [ + "svcount_t png", + "float16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ], + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_f32]_x2", + "arguments": [ + "svcount_t png", + "float32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_f32]_x4", + "arguments": [ + "svcount_t png", + "float32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ], + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_f64]_x2", + "arguments": [ + "svcount_t png", + "float64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_f64]_x4", + "arguments": [ + "svcount_t png", + "float64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ], + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s16]_x2", + "arguments": [ + "svcount_t png", + "int16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s16]_x4", + "arguments": [ + "svcount_t png", + "int16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ], + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s32]_x2", + "arguments": [ + "svcount_t png", + "int32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s32]_x4", + "arguments": [ + "svcount_t png", + "int32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ], + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s64]_x2", + "arguments": [ + "svcount_t png", + "int64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s64]_x4", + "arguments": [ + "svcount_t png", + "int64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ], + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s8]_x2", + "arguments": [ + "svcount_t png", + "int8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s8]_x4", + "arguments": [ + "svcount_t png", + "int8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ], + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u16]_x2", + "arguments": [ + "svcount_t png", + "uint16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u16]_x4", + "arguments": [ + "svcount_t png", + "uint16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ], + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u32]_x2", + "arguments": [ + "svcount_t png", + "uint32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u32]_x4", + "arguments": [ + "svcount_t png", + "uint32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ], + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u64]_x2", + "arguments": [ + "svcount_t png", + "uint64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u64]_x4", + "arguments": [ + "svcount_t png", + "uint64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ], + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u8]_x2", + "arguments": [ + "svcount_t png", + "uint8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u8]_x4", + "arguments": [ + "svcount_t png", + "uint8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FLOGB" + ], + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FLOGB" + ], + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FLOGB" + ], + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FLOGB" + ], + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FLOGB" + ], + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FLOGB" + ], + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmatch[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmatch[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmatch[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmatch[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxnmp[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMP" + ], + [ + "MOVPRFX", + "FMAXNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxnmp[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMP" + ], + [ + "MOVPRFX", + "FMAXNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxnmp[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMP" + ], + [ + "MOVPRFX", + "FMAXNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxnmp[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMP" + ], + [ + "MOVPRFX", + "FMAXNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxnmp[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMP" + ], + [ + "MOVPRFX", + "FMAXNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxnmp[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMP" + ], + [ + "MOVPRFX", + "FMAXNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnmv[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnmv[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnmv[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMV" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXP" + ], + [ + "MOVPRFX", + "FMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXP" + ], + [ + "MOVPRFX", + "FMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXP" + ], + [ + "MOVPRFX", + "FMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXP" + ], + [ + "MOVPRFX", + "FMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXP" + ], + [ + "MOVPRFX", + "FMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXP" + ], + [ + "MOVPRFX", + "FMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminnmp[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMP" + ], + [ + "MOVPRFX", + "FMINNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminnmp[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMP" + ], + [ + "MOVPRFX", + "FMINNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminnmp[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMP" + ], + [ + "MOVPRFX", + "FMINNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminnmp[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMP" + ], + [ + "MOVPRFX", + "FMINNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminnmp[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMP" + ], + [ + "MOVPRFX", + "FMINNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminnmp[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMP" + ], + [ + "MOVPRFX", + "FMINNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnmv[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnmv[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnmv[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMV" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINP" + ], + [ + "MOVPRFX", + "FMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINP" + ], + [ + "MOVPRFX", + "FMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINP" + ], + [ + "MOVPRFX", + "FMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINP" + ], + [ + "MOVPRFX", + "FMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINP" + ], + [ + "MOVPRFX", + "FMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINP" + ], + [ + "MOVPRFX", + "FMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla_lane[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla_lane[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla_lane[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmla_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmla_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmla_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmla_lane[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmla_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmla_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLALB" + ], + [ + "MOVPRFX", + "FMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_n_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLALB" + ], + [ + "MOVPRFX", + "FMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb_lane[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLALB" + ], + [ + "MOVPRFX", + "FMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLALT" + ], + [ + "MOVPRFX", + "FMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_n_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLALT" + ], + [ + "MOVPRFX", + "FMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt_lane[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLALT" + ], + [ + "MOVPRFX", + "FMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls_lane[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls_lane[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls_lane[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmls_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmls_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmls_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmls_lane[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmls_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmls_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLSLB" + ], + [ + "MOVPRFX", + "FMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_n_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLSLB" + ], + [ + "MOVPRFX", + "FMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb_lane[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLSLB" + ], + [ + "MOVPRFX", + "FMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLSLT" + ], + [ + "MOVPRFX", + "FMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_n_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLSLT" + ], + [ + "MOVPRFX", + "FMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt_lane[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLSLT" + ], + [ + "MOVPRFX", + "FMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmmla[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMMLA" + ], + [ + "MOVPRFX", + "FMMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmmla[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMMLA" + ], + [ + "MOVPRFX", + "FMMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmmla[_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMMLA" + ], + [ + "MOVPRFX", + "SMMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmmla[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMMLA" + ], + [ + "MOVPRFX", + "UMMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmov[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlb[_s16]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlb[_s32]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlb[_s64]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlb[_u16]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlb[_u32]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlb[_u64]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlt[_s16]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlt[_s32]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlt[_s64]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlt[_u16]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlt[_u32]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlt[_u64]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul_lane[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul_lane[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul_lane[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmul_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmul_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmul_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmul_lane[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmul_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmul_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_n_u32]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_u16]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_u32]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb_lane[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb_lane[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb_lane[_u32]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb_lane[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_n_u32]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_u16]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_u32]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt_lane[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt_lane[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt_lane[_u32]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt_lane[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnand[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NAND" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNEG" + ], + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNEG" + ], + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNEG" + ], + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNEG" + ], + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNEG" + ], + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNEG" + ], + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s8]_m", + "arguments": [ + "svint8_t inactive", + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnmatch[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NMATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnmatch[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NMATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnmatch[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NMATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnmatch[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NMATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnor[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s8]_m", + "arguments": [ + "svint8_t inactive", + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u16]_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u8]_m", + "arguments": [ + "svuint8_t inactive", + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorn[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c16", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c16_x2", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c32", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c32_x2", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c64", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c64_x2", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c8", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c8_x2", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svpfalse[_b]", + "arguments": [], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PFALSE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpfalse_c", + "arguments": [], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PFALSE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svpfirst[_b]", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ptied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PFIRST" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmul[_n_u8]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmul[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb[_u16]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb_pair[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb_pair[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb_pair[_n_u8]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb_pair[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb_pair[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb_pair[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt[_u16]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt_pair[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt_pair[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt_pair[_n_u8]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt_pair[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt_pair[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt_pair[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svpnext_b16", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ptied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PNEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svpnext_b32", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ptied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PNEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svpnext_b64", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ptied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PNEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svpnext_b8", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ptied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PNEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb", + "arguments": [ + "svbool_t pg", + "const void *base", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather[_u32base]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather[_u32base]_offset", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather[_u64base]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather[_u64base]_offset", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather_[s32]offset", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint32_t offsets", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather_[s64]offset", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint64_t offsets", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather_[u32]offset", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint32_t offsets", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather_[u64]offset", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint64_t offsets", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_vnum", + "arguments": [ + "svbool_t pg", + "const void *base", + "int64_t vnum", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd", + "arguments": [ + "svbool_t pg", + "const void *base", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ], + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather[_u32base]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather[_u32base]_index", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather[_u64base]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather[_u64base]_index", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather_[s32]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint32_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather_[s64]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint64_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather_[u32]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint32_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather_[u64]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint64_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_vnum", + "arguments": [ + "svbool_t pg", + "const void *base", + "int64_t vnum", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ], + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh", + "arguments": [ + "svbool_t pg", + "const void *base", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ], + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather[_u32base]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather[_u32base]_index", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather[_u64base]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather[_u64base]_index", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather_[s32]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint32_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather_[s64]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint64_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather_[u32]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint32_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather_[u64]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint64_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_vnum", + "arguments": [ + "svbool_t pg", + "const void *base", + "int64_t vnum", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ], + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw", + "arguments": [ + "svbool_t pg", + "const void *base", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ], + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather[_u32base]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather[_u32base]_index", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather[_u64base]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather[_u64base]_index", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather_[s32]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint32_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather_[s64]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint64_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather_[u32]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint32_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather_[u64]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint64_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_vnum", + "arguments": [ + "svbool_t pg", + "const void *base", + "int64_t vnum", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ], + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_b16", + "arguments": [ + "svbool_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_b32", + "arguments": [ + "svbool_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_b64", + "arguments": [ + "svbool_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_b8", + "arguments": [ + "svbool_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_c16", + "arguments": [ + "svcount_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_c32", + "arguments": [ + "svcount_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_c64", + "arguments": [ + "svcount_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_c8", + "arguments": [ + "svcount_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptest_any", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "bool" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptest_first", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "bool" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptest_last", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "bool" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_b16", + "arguments": [], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_b32", + "arguments": [], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_b64", + "arguments": [], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_b8", + "arguments": [], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svptrue_c16", + "arguments": [], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svptrue_c32", + "arguments": [], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svptrue_c64", + "arguments": [], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svptrue_c8", + "arguments": [], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_pat_b16", + "arguments": [ + "enum svpattern pattern" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_pat_b32", + "arguments": [ + "enum svpattern pattern" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_pat_b64", + "arguments": [ + "enum svpattern pattern" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_pat_b8", + "arguments": [ + "enum svpattern pattern" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s8]_m", + "arguments": [ + "svint8_t inactive", + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_s8]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_u8]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqcadd[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQCADD" + ], + [ + "MOVPRFX", + "SQCADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqcadd[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQCADD" + ], + [ + "MOVPRFX", + "SQCADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqcadd[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQCADD" + ], + [ + "MOVPRFX", + "SQCADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqcadd[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQCADD" + ], + [ + "MOVPRFX", + "SQCADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqcvtn_s16[_s32_x2]", + "arguments": [ + "svint32x2_t zn" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "zn": { + "Z multi-vector": "{ Zreg2.S, Zreg3.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQCVTN" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqcvtn_u16[_s32_x2]", + "arguments": [ + "svint32x2_t zn" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "zn": { + "Z multi-vector": "{ Zreg2.S, Zreg3.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQCVTUN" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqcvtn_u16[_u32_x2]", + "arguments": [ + "svuint32x2_t zn" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "zn": { + "Z multi-vector": "{ Zreg2.S, Zreg3.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQCVTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd[_s64]", + "arguments": [ + "svint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECD" + ], + [ + "MOVPRFX", + "SQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd[_u64]", + "arguments": [ + "svuint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECD" + ], + [ + "MOVPRFX", + "UQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd_pat[_s64]", + "arguments": [ + "svint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECD" + ], + [ + "MOVPRFX", + "SQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd_pat[_u64]", + "arguments": [ + "svuint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECD" + ], + [ + "MOVPRFX", + "UQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech[_s16]", + "arguments": [ + "svint16_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECH" + ], + [ + "MOVPRFX", + "SQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech[_u16]", + "arguments": [ + "svuint16_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECH" + ], + [ + "MOVPRFX", + "UQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech_pat[_s16]", + "arguments": [ + "svint16_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECH" + ], + [ + "MOVPRFX", + "SQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech_pat[_u16]", + "arguments": [ + "svuint16_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECH" + ], + [ + "MOVPRFX", + "UQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s32]_b16", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s32]_b32", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s32]_b64", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s32]_b8", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s64]_b16", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s64]_b32", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s64]_b64", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s64]_b8", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u32]_b16", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u32]_b32", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u32]_b64", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u32]_b8", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u64]_b16", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u64]_b32", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u64]_b64", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u64]_b8", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_s16]", + "arguments": [ + "svint16_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ], + [ + "MOVPRFX", + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_s32]", + "arguments": [ + "svint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ], + [ + "MOVPRFX", + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_s64]", + "arguments": [ + "svint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ], + [ + "MOVPRFX", + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_u16]", + "arguments": [ + "svuint16_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ], + [ + "MOVPRFX", + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_u32]", + "arguments": [ + "svuint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ], + [ + "MOVPRFX", + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_u64]", + "arguments": [ + "svuint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ], + [ + "MOVPRFX", + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw[_s32]", + "arguments": [ + "svint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECW" + ], + [ + "MOVPRFX", + "SQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw[_u32]", + "arguments": [ + "svuint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECW" + ], + [ + "MOVPRFX", + "UQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw_pat[_s32]", + "arguments": [ + "svint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECW" + ], + [ + "MOVPRFX", + "SQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw_pat[_u32]", + "arguments": [ + "svuint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECW" + ], + [ + "MOVPRFX", + "UQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalbt[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALBT" + ], + [ + "MOVPRFX", + "SQDMLALBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalbt[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALBT" + ], + [ + "MOVPRFX", + "SQDMLALBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalbt[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALBT" + ], + [ + "MOVPRFX", + "SQDMLALBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalbt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALBT" + ], + [ + "MOVPRFX", + "SQDMLALBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalbt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALBT" + ], + [ + "MOVPRFX", + "SQDMLALBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalbt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALBT" + ], + [ + "MOVPRFX", + "SQDMLALBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslbt[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLBT" + ], + [ + "MOVPRFX", + "SQDMLSLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslbt[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLBT" + ], + [ + "MOVPRFX", + "SQDMLSLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslbt[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLBT" + ], + [ + "MOVPRFX", + "SQDMLSLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslbt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLBT" + ], + [ + "MOVPRFX", + "SQDMLSLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslbt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLBT" + ], + [ + "MOVPRFX", + "SQDMLSLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslbt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLBT" + ], + [ + "MOVPRFX", + "SQDMLSLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_n_s8]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb_lane[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb_lane[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt_lane[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt_lane[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd[_s64]", + "arguments": [ + "svint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCD" + ], + [ + "MOVPRFX", + "SQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd[_u64]", + "arguments": [ + "svuint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCD" + ], + [ + "MOVPRFX", + "UQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd_pat[_s64]", + "arguments": [ + "svint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCD" + ], + [ + "MOVPRFX", + "SQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd_pat[_u64]", + "arguments": [ + "svuint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCD" + ], + [ + "MOVPRFX", + "UQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch[_s16]", + "arguments": [ + "svint16_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCH" + ], + [ + "MOVPRFX", + "SQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch[_u16]", + "arguments": [ + "svuint16_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCH" + ], + [ + "MOVPRFX", + "UQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch_pat[_s16]", + "arguments": [ + "svint16_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCH" + ], + [ + "MOVPRFX", + "SQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch_pat[_u16]", + "arguments": [ + "svuint16_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCH" + ], + [ + "MOVPRFX", + "UQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s32]_b16", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s32]_b32", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s32]_b64", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s32]_b8", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s64]_b16", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s64]_b32", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s64]_b64", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s64]_b8", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u32]_b16", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u32]_b32", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u32]_b64", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u32]_b8", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u64]_b16", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u64]_b32", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u64]_b64", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u64]_b8", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_s16]", + "arguments": [ + "svint16_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ], + [ + "MOVPRFX", + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_s32]", + "arguments": [ + "svint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ], + [ + "MOVPRFX", + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_s64]", + "arguments": [ + "svint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ], + [ + "MOVPRFX", + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_u16]", + "arguments": [ + "svuint16_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ], + [ + "MOVPRFX", + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_u32]", + "arguments": [ + "svuint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ], + [ + "MOVPRFX", + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_u64]", + "arguments": [ + "svuint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ], + [ + "MOVPRFX", + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw[_s32]", + "arguments": [ + "svint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCW" + ], + [ + "MOVPRFX", + "SQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw[_u32]", + "arguments": [ + "svuint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCW" + ], + [ + "MOVPRFX", + "UQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw_pat[_s32]", + "arguments": [ + "svint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCW" + ], + [ + "MOVPRFX", + "SQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw_pat[_u32]", + "arguments": [ + "svuint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCW" + ], + [ + "MOVPRFX", + "UQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s8]_m", + "arguments": [ + "svint8_t inactive", + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdcmlah[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDCMLAH" + ], + [ + "MOVPRFX", + "SQRDCMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdcmlah[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDCMLAH" + ], + [ + "MOVPRFX", + "SQRDCMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdcmlah[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDCMLAH" + ], + [ + "MOVPRFX", + "SQRDCMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdcmlah[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "svint8_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDCMLAH" + ], + [ + "MOVPRFX", + "SQRDCMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdcmlah_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDCMLAH" + ], + [ + "MOVPRFX", + "SQRDCMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdcmlah_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDCMLAH" + ], + [ + "MOVPRFX", + "SQRDCMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_n_s8]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrn[_n]_s16[_s32_x2]", + "arguments": [ + "svint32x2_t zn", + "uint64_t imm" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "zn": { + "Z multi-vector": "{ Zreg2.S, Zreg3.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRN" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrn[_n]_u16[_u32_x2]", + "arguments": [ + "svuint32x2_t zn", + "uint64_t imm" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "zn": { + "Z multi-vector": "{ Zreg2.S, Zreg3.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHRN" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnb[_n_s16]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnb[_n_s32]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnb[_n_s64]", + "arguments": [ + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnt[_n_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnt[_n_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnt[_n_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnt[_n_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnt[_n_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnt[_n_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrun[_n]_u16[_s32_x2]", + "arguments": [ + "svint32x2_t zn", + "uint64_t imm" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "zn": { + "Z multi-vector": "{ Zreg2.S, Zreg3.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRUN" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrunb[_n_s16]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrunb[_n_s32]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrunb[_n_s64]", + "arguments": [ + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrunt[_n_s16]", + "arguments": [ + "svuint8_t even", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrunt[_n_s32]", + "arguments": [ + "svuint16_t even", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrunt[_n_s64]", + "arguments": [ + "svuint32_t even", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Zop1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Zop1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnb[_n_s16]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnb[_n_s32]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnb[_n_s64]", + "arguments": [ + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnt[_n_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnt[_n_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnt[_n_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnt[_n_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnt[_n_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnt[_n_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrunb[_n_s16]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrunb[_n_s32]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrunb[_n_s64]", + "arguments": [ + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrunt[_n_s16]", + "arguments": [ + "svuint8_t even", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrunt[_n_s32]", + "arguments": [ + "svuint16_t even", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrunt[_n_s64]", + "arguments": [ + "svuint32_t even", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_s8]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_u8]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnb[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnb[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnb[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnb[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQXTNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnb[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQXTNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnb[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQXTNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnt[_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnt[_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnt[_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnt[_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQXTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnt[_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQXTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnt[_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQXTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtunb[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtunb[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtunb[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtunt[_s16]", + "arguments": [ + "svuint8_t even", + "svint16_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtunt[_s32]", + "arguments": [ + "svuint16_t even", + "svint32_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtunt[_s64]", + "arguments": [ + "svuint32_t even", + "svint64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_n_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_n_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_n_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_n_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_n_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_n_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrax1[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RAX1" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrax1[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RAX1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s8]_m", + "arguments": [ + "svint8_t inactive", + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u16]_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u8]_m", + "arguments": [ + "svuint8_t inactive", + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrdffr", + "arguments": [], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDFFR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrdffr_z", + "arguments": [ + "svbool_t pg" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDFFR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpe[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpe[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpe[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrecpe[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URECPE" + ], + [ + "MOVPRFX", + "URECPE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrecpe[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URECPE" + ], + [ + "MOVPRFX", + "URECPE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrecpe[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URECPE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecps[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecps[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecps[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPX" + ], + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPX" + ], + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPX" + ], + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPX" + ], + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPX" + ], + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPX" + ], + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svreinterpret[_b]", + "arguments": [ + "svcount_t count" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svreinterpret[_c]", + "arguments": [ + "svbool_t pg" + ], + "return_type": { + "value": "svcount_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev_b16", + "arguments": [ + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev_b32", + "arguments": [ + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev_b64", + "arguments": [ + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev_b8", + "arguments": [ + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u16]_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f16]_m", + "arguments": [ + "svfloat16_t zd", + "svbool_t pg", + "svfloat16_t zn" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t zn" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t zn" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f32]_m", + "arguments": [ + "svfloat32_t zd", + "svbool_t pg", + "svfloat32_t zn" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t zn" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t zn" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f64]_m", + "arguments": [ + "svfloat64_t zd", + "svbool_t pg", + "svfloat64_t zn" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t zn" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t zn" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s16]_m", + "arguments": [ + "svint16_t zd", + "svbool_t pg", + "svint16_t zn" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t zn" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t zn" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s32]_m", + "arguments": [ + "svint32_t zd", + "svbool_t pg", + "svint32_t zn" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t zn" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t zn" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s64]_m", + "arguments": [ + "svint64_t zd", + "svbool_t pg", + "svint64_t zn" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t zn" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t zn" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s8]_m", + "arguments": [ + "svint8_t zd", + "svbool_t pg", + "svint8_t zn" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t zn" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t zn" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u16]_m", + "arguments": [ + "svuint16_t zd", + "svbool_t pg", + "svuint16_t zn" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t zn" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t zn" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u32]_m", + "arguments": [ + "svuint32_t zd", + "svbool_t pg", + "svuint32_t zn" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t zn" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t zn" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u64]_m", + "arguments": [ + "svuint64_t zd", + "svbool_t pg", + "svuint64_t zn" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t zn" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t zn" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u8]_m", + "arguments": [ + "svuint8_t zd", + "svbool_t pg", + "svuint8_t zn" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t zn" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t zn" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevw[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVW" + ], + [ + "MOVPRFX", + "REVW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevw[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVW" + ], + [ + "MOVPRFX", + "REVW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevw[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevw[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVW" + ], + [ + "MOVPRFX", + "REVW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevw[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVW" + ], + [ + "MOVPRFX", + "REVW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevw[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTA" + ], + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTA" + ], + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTA" + ], + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTA" + ], + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTA" + ], + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTA" + ], + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTI" + ], + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTI" + ], + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTI" + ], + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTI" + ], + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTI" + ], + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTI" + ], + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTM" + ], + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTM" + ], + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTM" + ], + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTM" + ], + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTM" + ], + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTM" + ], + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTN" + ], + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTN" + ], + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTN" + ], + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTN" + ], + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTN" + ], + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTN" + ], + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTP" + ], + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTP" + ], + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTP" + ], + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTP" + ], + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTP" + ], + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTP" + ], + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTX" + ], + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTX" + ], + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTX" + ], + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTX" + ], + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTX" + ], + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTX" + ], + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTZ" + ], + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTZ" + ], + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTZ" + ], + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTZ" + ], + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTZ" + ], + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTZ" + ], + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnb[_n_s16]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnb[_n_s32]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnb[_n_s64]", + "arguments": [ + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnt[_n_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnt[_n_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnt[_n_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnt[_n_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnt[_n_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnt[_n_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrsqrte[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRSQRTE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrsqrte[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRSQRTE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrsqrte[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRSQRTE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsqrte[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSQRTE" + ], + [ + "MOVPRFX", + "URSQRTE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsqrte[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSQRTE" + ], + [ + "MOVPRFX", + "URSQRTE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsqrte[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSQRTE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrsqrts[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRSQRTS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrsqrts[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRSQRTS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrsqrts[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRSQRTS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSRA" + ], + [ + "MOVPRFX", + "SRSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSRA" + ], + [ + "MOVPRFX", + "SRSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSRA" + ], + [ + "MOVPRFX", + "SRSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSRA" + ], + [ + "MOVPRFX", + "SRSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSRA" + ], + [ + "MOVPRFX", + "URSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSRA" + ], + [ + "MOVPRFX", + "URSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSRA" + ], + [ + "MOVPRFX", + "URSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSRA" + ], + [ + "MOVPRFX", + "URSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_n_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_n_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_n_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_n_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_n_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_n_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLB" + ], + [ + "MOVPRFX", + "SBCLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLB" + ], + [ + "MOVPRFX", + "SBCLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLB" + ], + [ + "MOVPRFX", + "SBCLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLB" + ], + [ + "MOVPRFX", + "SBCLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclt[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLT" + ], + [ + "MOVPRFX", + "SBCLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclt[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLT" + ], + [ + "MOVPRFX", + "SBCLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclt[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLT" + ], + [ + "MOVPRFX", + "SBCLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclt[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLT" + ], + [ + "MOVPRFX", + "SBCLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_b]", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svset2[_b]", + "arguments": [ + "svboolx2_t tuple", + "uint64_t imm_index", + "svbool_t x" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_f16]", + "arguments": [ + "svfloat16x2_t tuple", + "uint64_t imm_index", + "svfloat16_t x" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_f32]", + "arguments": [ + "svfloat32x2_t tuple", + "uint64_t imm_index", + "svfloat32_t x" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_f64]", + "arguments": [ + "svfloat64x2_t tuple", + "uint64_t imm_index", + "svfloat64_t x" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_s16]", + "arguments": [ + "svint16x2_t tuple", + "uint64_t imm_index", + "svint16_t x" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_s32]", + "arguments": [ + "svint32x2_t tuple", + "uint64_t imm_index", + "svint32_t x" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_s64]", + "arguments": [ + "svint64x2_t tuple", + "uint64_t imm_index", + "svint64_t x" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_s8]", + "arguments": [ + "svint8x2_t tuple", + "uint64_t imm_index", + "svint8_t x" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_u16]", + "arguments": [ + "svuint16x2_t tuple", + "uint64_t imm_index", + "svuint16_t x" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_u32]", + "arguments": [ + "svuint32x2_t tuple", + "uint64_t imm_index", + "svuint32_t x" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_u64]", + "arguments": [ + "svuint64x2_t tuple", + "uint64_t imm_index", + "svuint64_t x" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_u8]", + "arguments": [ + "svuint8x2_t tuple", + "uint64_t imm_index", + "svuint8_t x" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_f16]", + "arguments": [ + "svfloat16x3_t tuple", + "uint64_t imm_index", + "svfloat16_t x" + ], + "return_type": { + "value": "svfloat16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_f32]", + "arguments": [ + "svfloat32x3_t tuple", + "uint64_t imm_index", + "svfloat32_t x" + ], + "return_type": { + "value": "svfloat32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_f64]", + "arguments": [ + "svfloat64x3_t tuple", + "uint64_t imm_index", + "svfloat64_t x" + ], + "return_type": { + "value": "svfloat64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_s16]", + "arguments": [ + "svint16x3_t tuple", + "uint64_t imm_index", + "svint16_t x" + ], + "return_type": { + "value": "svint16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_s32]", + "arguments": [ + "svint32x3_t tuple", + "uint64_t imm_index", + "svint32_t x" + ], + "return_type": { + "value": "svint32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_s64]", + "arguments": [ + "svint64x3_t tuple", + "uint64_t imm_index", + "svint64_t x" + ], + "return_type": { + "value": "svint64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_s8]", + "arguments": [ + "svint8x3_t tuple", + "uint64_t imm_index", + "svint8_t x" + ], + "return_type": { + "value": "svint8x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_u16]", + "arguments": [ + "svuint16x3_t tuple", + "uint64_t imm_index", + "svuint16_t x" + ], + "return_type": { + "value": "svuint16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_u32]", + "arguments": [ + "svuint32x3_t tuple", + "uint64_t imm_index", + "svuint32_t x" + ], + "return_type": { + "value": "svuint32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_u64]", + "arguments": [ + "svuint64x3_t tuple", + "uint64_t imm_index", + "svuint64_t x" + ], + "return_type": { + "value": "svuint64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_u8]", + "arguments": [ + "svuint8x3_t tuple", + "uint64_t imm_index", + "svuint8_t x" + ], + "return_type": { + "value": "svuint8x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svset4[_b]", + "arguments": [ + "svboolx4_t tuple", + "uint64_t imm_index", + "svbool_t x" + ], + "return_type": { + "value": "svboolx4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_f16]", + "arguments": [ + "svfloat16x4_t tuple", + "uint64_t imm_index", + "svfloat16_t x" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_f32]", + "arguments": [ + "svfloat32x4_t tuple", + "uint64_t imm_index", + "svfloat32_t x" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_f64]", + "arguments": [ + "svfloat64x4_t tuple", + "uint64_t imm_index", + "svfloat64_t x" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_s16]", + "arguments": [ + "svint16x4_t tuple", + "uint64_t imm_index", + "svint16_t x" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_s32]", + "arguments": [ + "svint32x4_t tuple", + "uint64_t imm_index", + "svint32_t x" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_s64]", + "arguments": [ + "svint64x4_t tuple", + "uint64_t imm_index", + "svint64_t x" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_s8]", + "arguments": [ + "svint8x4_t tuple", + "uint64_t imm_index", + "svint8_t x" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_u16]", + "arguments": [ + "svuint16x4_t tuple", + "uint64_t imm_index", + "svuint16_t x" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_u32]", + "arguments": [ + "svuint32x4_t tuple", + "uint64_t imm_index", + "svuint32_t x" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_u64]", + "arguments": [ + "svuint64x4_t tuple", + "uint64_t imm_index", + "svuint64_t x" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_u8]", + "arguments": [ + "svuint8x4_t tuple", + "uint64_t imm_index", + "svuint8_t x" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsetffr", + "arguments": [], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SETFFR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllb[_n_s16]", + "arguments": [ + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllb[_n_s32]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllb[_n_s64]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllb[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllb[_n_u32]", + "arguments": [ + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllb[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllt[_n_s16]", + "arguments": [ + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllt[_n_s32]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllt[_n_s64]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllt[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllt[_n_u32]", + "arguments": [ + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllt[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnb[_n_s16]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnb[_n_s32]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnb[_n_s64]", + "arguments": [ + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnt[_n_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnt[_n_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnt[_n_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnt[_n_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnt[_n_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnt[_n_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsm4e[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SM4E" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsm4ekey[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SM4EKEY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSQRT" + ], + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSQRT" + ], + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSQRT" + ], + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSQRT" + ], + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSQRT" + ], + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSQRT" + ], + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSRA" + ], + [ + "MOVPRFX", + "SSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSRA" + ], + [ + "MOVPRFX", + "SSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSRA" + ], + [ + "MOVPRFX", + "SSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSRA" + ], + [ + "MOVPRFX", + "SSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USRA" + ], + [ + "MOVPRFX", + "USRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USRA" + ], + [ + "MOVPRFX", + "USRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USRA" + ], + [ + "MOVPRFX", + "USRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USRA" + ], + [ + "MOVPRFX", + "USRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "svfloat16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_f16_x2]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "svfloat16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_f16_x4]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "svfloat16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_f32_x2]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "svfloat32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_f32_x4]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "svfloat32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_f64_x2]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "svfloat64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_f64_x4]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "svfloat64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s16_x2]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "svint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s16_x4]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "svint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s32_x2]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "svint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s32_x4]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "svint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s64_x2]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "svint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s64_x4]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "svint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s8_x2]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "svint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s8_x4]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "svint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u16_x2]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "svuint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u16_x4]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "svuint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u32_x2]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "svuint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u32_x4]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "svuint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u64_x2]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "svuint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u64_x4]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "svuint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u8_x2]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "svuint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u8_x4]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "svuint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base]_index[_f32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base]_index[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base]_index[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base]_offset[_f32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base]_offset[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base]_offset[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base_f32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base]_index[_f64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base]_index[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base]_index[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base]_offset[_f64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base_f64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s32]index[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svint32_t indices", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s32]index[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint32_t indices", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s32]index[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svint32_t indices", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svint32_t offsets", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s64]index[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svint64_t indices", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svint64_t offsets", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u32]index[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svuint32_t indices", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u32]index[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svuint32_t indices", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u32]index[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32_t indices", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svuint32_t offsets", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svuint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u64]index[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svuint64_t indices", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svuint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svuint64_t offsets", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "int64_t vnum", + "svfloat16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_f16_x2]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "int64_t vnum", + "svfloat16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_f16_x4]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "int64_t vnum", + "svfloat16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "int64_t vnum", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_f32_x2]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "int64_t vnum", + "svfloat32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_f32_x4]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "int64_t vnum", + "svfloat32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "int64_t vnum", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_f64_x2]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "int64_t vnum", + "svfloat64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_f64_x4]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "int64_t vnum", + "svfloat64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "int64_t vnum", + "svint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s16_x2]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "int64_t vnum", + "svint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s16_x4]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "int64_t vnum", + "svint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "int64_t vnum", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s32_x2]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "int64_t vnum", + "svint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s32_x4]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "int64_t vnum", + "svint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "int64_t vnum", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s64_x2]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "int64_t vnum", + "svint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s64_x4]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "int64_t vnum", + "svint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s8_x2]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "int64_t vnum", + "svint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s8_x4]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "int64_t vnum", + "svint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "int64_t vnum", + "svuint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u16_x2]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "int64_t vnum", + "svuint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u16_x4]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "int64_t vnum", + "svuint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "int64_t vnum", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u32_x2]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "int64_t vnum", + "svuint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u32_x4]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "int64_t vnum", + "svuint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "int64_t vnum", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u64_x2]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "int64_t vnum", + "svuint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u64_x4]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "int64_t vnum", + "svuint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u8_x2]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "int64_t vnum", + "svuint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u8_x4]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "int64_t vnum", + "svuint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b[_s16]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b[_s32]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b[_s64]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b[_u16]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b[_u32]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b[_u64]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u32base]_offset[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u32base]_offset[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u32base_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u32base_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[s32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[s32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svuint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h[_s32]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h[_u32]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u32base]_index[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u32base]_index[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u32base]_offset[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u32base]_offset[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u32base_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u32base_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u64base]_index[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u64base]_index[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s32]index[_s32]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint32_t indices", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s32]index[_u32]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svint32_t indices", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u32]index[_s32]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svuint32_t indices", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u32]index[_u32]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint32_t indices", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svuint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svuint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "int64_t vnum", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "int64_t vnum", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "int64_t vnum", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "int64_t vnum", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter[_u64base]_index[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter[_u64base]_index[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svuint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "int64_t vnum", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "int64_t vnum", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "svfloat16x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2H" + ], + [ + "ST2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svfloat32x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2W" + ], + [ + "ST2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svfloat64x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2D" + ], + [ + "ST2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint16x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2H" + ], + [ + "ST2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint32x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2W" + ], + [ + "ST2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2D" + ], + [ + "ST2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint8x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B, Zdata1.B}" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2B" + ], + [ + "ST2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint16x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2H" + ], + [ + "ST2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2W" + ], + [ + "ST2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2D" + ], + [ + "ST2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint8x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B, Zdata1.B}" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2B" + ], + [ + "ST2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "int64_t vnum", + "svfloat16x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2H" + ], + [ + "ST2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "int64_t vnum", + "svfloat32x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2W" + ], + [ + "ST2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "int64_t vnum", + "svfloat64x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2D" + ], + [ + "ST2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "int64_t vnum", + "svint16x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2H" + ], + [ + "ST2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "int64_t vnum", + "svint32x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2W" + ], + [ + "ST2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "int64_t vnum", + "svint64x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2D" + ], + [ + "ST2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint8x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B, Zdata1.B}" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2B" + ], + [ + "ST2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "int64_t vnum", + "svuint16x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2H" + ], + [ + "ST2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "int64_t vnum", + "svuint32x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2W" + ], + [ + "ST2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "int64_t vnum", + "svuint64x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2D" + ], + [ + "ST2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint8x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B, Zdata1.B}" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2B" + ], + [ + "ST2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "svfloat16x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata2.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3H" + ], + [ + "ST3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svfloat32x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata2.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3W" + ], + [ + "ST3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svfloat64x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata2.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3D" + ], + [ + "ST3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint16x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata2.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3H" + ], + [ + "ST3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint32x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata2.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3W" + ], + [ + "ST3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata2.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3D" + ], + [ + "ST3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint8x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata2.B}" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3B" + ], + [ + "ST3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint16x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata2.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3H" + ], + [ + "ST3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata2.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3W" + ], + [ + "ST3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata2.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3D" + ], + [ + "ST3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint8x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata2.B}" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3B" + ], + [ + "ST3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "int64_t vnum", + "svfloat16x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata2.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3H" + ], + [ + "ST3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "int64_t vnum", + "svfloat32x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata2.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3W" + ], + [ + "ST3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "int64_t vnum", + "svfloat64x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata2.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3D" + ], + [ + "ST3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "int64_t vnum", + "svint16x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata2.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3H" + ], + [ + "ST3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "int64_t vnum", + "svint32x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata2.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3W" + ], + [ + "ST3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "int64_t vnum", + "svint64x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata2.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3D" + ], + [ + "ST3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint8x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata2.B}" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3B" + ], + [ + "ST3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "int64_t vnum", + "svuint16x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata2.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3H" + ], + [ + "ST3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "int64_t vnum", + "svuint32x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata2.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3W" + ], + [ + "ST3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "int64_t vnum", + "svuint64x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata2.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3D" + ], + [ + "ST3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint8x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata2.B}" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3B" + ], + [ + "ST3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "svfloat16x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata3.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4H" + ], + [ + "ST4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svfloat32x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata3.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4W" + ], + [ + "ST4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svfloat64x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata3.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4D" + ], + [ + "ST4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint16x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata3.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4H" + ], + [ + "ST4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint32x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata3.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4W" + ], + [ + "ST4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata3.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4D" + ], + [ + "ST4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint8x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata3.B}" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4B" + ], + [ + "ST4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint16x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata3.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4H" + ], + [ + "ST4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata3.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4W" + ], + [ + "ST4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata3.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4D" + ], + [ + "ST4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint8x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata3.B}" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4B" + ], + [ + "ST4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "int64_t vnum", + "svfloat16x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata3.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4H" + ], + [ + "ST4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "int64_t vnum", + "svfloat32x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata3.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4W" + ], + [ + "ST4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "int64_t vnum", + "svfloat64x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata3.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4D" + ], + [ + "ST4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "int64_t vnum", + "svint16x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata3.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4H" + ], + [ + "ST4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "int64_t vnum", + "svint32x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata3.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4W" + ], + [ + "ST4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "int64_t vnum", + "svint64x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata3.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4D" + ], + [ + "ST4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint8x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata3.B}" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4B" + ], + [ + "ST4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "int64_t vnum", + "svuint16x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata3.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4H" + ], + [ + "ST4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "int64_t vnum", + "svuint32x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata3.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4W" + ], + [ + "ST4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "int64_t vnum", + "svuint64x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata3.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4D" + ], + [ + "ST4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint8x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata3.B}" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4B" + ], + [ + "ST4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "svfloat16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ], + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_f16_x2]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "svfloat16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_f16_x4]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "svfloat16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ], + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_f32_x2]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "svfloat32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_f32_x4]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "svfloat32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ], + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_f64_x2]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "svfloat64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_f64_x4]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "svfloat64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ], + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s16_x2]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "svint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s16_x4]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "svint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ], + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s32_x2]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "svint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s32_x4]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "svint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ], + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s64_x2]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "svint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s64_x4]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "svint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ], + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s8_x2]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "svint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s8_x4]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "svint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ], + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u16_x2]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "svuint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u16_x4]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "svuint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ], + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u32_x2]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "svuint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u32_x4]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "svuint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ], + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u64_x2]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "svuint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u64_x4]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "svuint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ], + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u8_x2]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "svuint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u8_x4]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "svuint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base]_index[_f32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base]_index[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base]_index[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base]_offset[_f32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base]_offset[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base]_offset[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base_f32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base]_index[_f64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base]_index[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base]_index[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base]_offset[_f64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base_f64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[s64]index[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svint64_t indices", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[s64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svint64_t offsets", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svuint32_t offsets", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svuint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u64]index[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svuint64_t indices", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svuint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svuint64_t offsets", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "int64_t vnum", + "svfloat16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ], + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_f16_x2]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "int64_t vnum", + "svfloat16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_f16_x4]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "int64_t vnum", + "svfloat16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "int64_t vnum", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ], + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_f32_x2]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "int64_t vnum", + "svfloat32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_f32_x4]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "int64_t vnum", + "svfloat32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "int64_t vnum", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ], + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_f64_x2]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "int64_t vnum", + "svfloat64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_f64_x4]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "int64_t vnum", + "svfloat64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "int64_t vnum", + "svint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ], + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s16_x2]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "int64_t vnum", + "svint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s16_x4]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "int64_t vnum", + "svint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "int64_t vnum", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ], + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s32_x2]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "int64_t vnum", + "svint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s32_x4]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "int64_t vnum", + "svint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "int64_t vnum", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ], + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s64_x2]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "int64_t vnum", + "svint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s64_x4]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "int64_t vnum", + "svint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ], + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s8_x2]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "int64_t vnum", + "svint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s8_x4]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "int64_t vnum", + "svint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "int64_t vnum", + "svuint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ], + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u16_x2]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "int64_t vnum", + "svuint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u16_x4]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "int64_t vnum", + "svuint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "int64_t vnum", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ], + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u32_x2]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "int64_t vnum", + "svuint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u32_x4]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "int64_t vnum", + "svuint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "int64_t vnum", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ], + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u64_x2]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "int64_t vnum", + "svuint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u64_x4]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "int64_t vnum", + "svuint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ], + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u8_x2]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "int64_t vnum", + "svuint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u8_x4]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "int64_t vnum", + "svuint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u32base]_offset[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u32base]_offset[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u32base_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u32base_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svuint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u32base]_index[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u32base]_index[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u32base]_offset[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u32base]_offset[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u32base_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u32base_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u64base]_index[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u64base]_index[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svuint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svuint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter[_u64base]_index[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter[_u64base]_index[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svuint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FSUB" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FSUB" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FSUB" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_n_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_n_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_n_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_n_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_n_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_n_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_n_u32]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_u16]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_u32]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublbt[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublbt[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublbt[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublbt[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublbt[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublbt[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_n_u32]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_u16]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_u32]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubltb[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubltb[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubltb[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubltb[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubltb[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubltb[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_n_s16]", + "arguments": [ + "svint16_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_n_s32]", + "arguments": [ + "svint32_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_n_s64]", + "arguments": [ + "svint64_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_n_s16]", + "arguments": [ + "svint16_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_n_s32]", + "arguments": [ + "svint32_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_n_s64]", + "arguments": [ + "svint64_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsudot[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USDOT" + ], + [ + "MOVPRFX", + "USDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsudot[_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USDOT" + ], + [ + "MOVPRFX", + "USDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsudot_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "svuint8_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUDOT" + ], + [ + "MOVPRFX", + "SUDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_f16]", + "arguments": [ + "svfloat16x2_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_f32]", + "arguments": [ + "svfloat32x2_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_f64]", + "arguments": [ + "svfloat64x2_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_s16]", + "arguments": [ + "svint16x2_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_s32]", + "arguments": [ + "svint32x2_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_s64]", + "arguments": [ + "svint64x2_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_s8]", + "arguments": [ + "svint8x2_t data", + "svuint8_t indices" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.B, Zdata1.B}" + }, + "indices": { + "register": "Zindices.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_u16]", + "arguments": [ + "svuint16x2_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_u32]", + "arguments": [ + "svuint32x2_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_u64]", + "arguments": [ + "svuint64x2_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_u8]", + "arguments": [ + "svuint8x2_t data", + "svuint8_t indices" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.B, Zdata1.B}" + }, + "indices": { + "register": "Zindices.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_f16]", + "arguments": [ + "svfloat16_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_f32]", + "arguments": [ + "svfloat32_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_f64]", + "arguments": [ + "svfloat64_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_s16]", + "arguments": [ + "svint16_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_s32]", + "arguments": [ + "svint32_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_s64]", + "arguments": [ + "svint64_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_s8]", + "arguments": [ + "svint8_t data", + "svuint8_t indices" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "indices": { + "register": "Zindices.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_u16]", + "arguments": [ + "svuint16_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_u32]", + "arguments": [ + "svuint32_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_u64]", + "arguments": [ + "svuint64_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_u8]", + "arguments": [ + "svuint8_t data", + "svuint8_t indices" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "indices": { + "register": "Zindices.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_f16]", + "arguments": [ + "svfloat16_t fallback", + "svfloat16_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Ztied.H" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_f32]", + "arguments": [ + "svfloat32_t fallback", + "svfloat32_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Ztied.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_f64]", + "arguments": [ + "svfloat64_t fallback", + "svfloat64_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Ztied.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_s16]", + "arguments": [ + "svint16_t fallback", + "svint16_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Ztied.H" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_s32]", + "arguments": [ + "svint32_t fallback", + "svint32_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Ztied.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_s64]", + "arguments": [ + "svint64_t fallback", + "svint64_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Ztied.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_s8]", + "arguments": [ + "svint8_t fallback", + "svint8_t data", + "svuint8_t indices" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Ztied.B" + }, + "indices": { + "register": "Zindices.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_u16]", + "arguments": [ + "svuint16_t fallback", + "svuint16_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Ztied.H" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_u32]", + "arguments": [ + "svuint32_t fallback", + "svuint32_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Ztied.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_u64]", + "arguments": [ + "svuint64_t fallback", + "svuint64_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Ztied.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_u8]", + "arguments": [ + "svuint8_t fallback", + "svuint8_t data", + "svuint8_t indices" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Ztied.B" + }, + "indices": { + "register": "Zindices.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtmad[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTMAD" + ], + [ + "MOVPRFX", + "FTMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtmad[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTMAD" + ], + [ + "MOVPRFX", + "FTMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtmad[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTMAD" + ], + [ + "MOVPRFX", + "FTMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1_b16", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.H" + }, + "op2": { + "register": "Pop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1_b32", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.S" + }, + "op2": { + "register": "Pop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1_b64", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.D" + }, + "op2": { + "register": "Pop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1_b8", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2_b16", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.H" + }, + "op2": { + "register": "Pop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2_b32", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.S" + }, + "op2": { + "register": "Pop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2_b64", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.D" + }, + "op2": { + "register": "Pop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2_b8", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtsmul[_f16]", + "arguments": [ + "svfloat16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTSMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtsmul[_f32]", + "arguments": [ + "svfloat32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTSMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtsmul[_f64]", + "arguments": [ + "svfloat64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTSMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtssel[_f16]", + "arguments": [ + "svfloat16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTSSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtssel[_f32]", + "arguments": [ + "svfloat32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTSSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtssel[_f64]", + "arguments": [ + "svfloat64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTSSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svundef2_b", + "arguments": [], + "return_type": { + "value": "svboolx2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_f16", + "arguments": [], + "return_type": { + "value": "svfloat16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_f32", + "arguments": [], + "return_type": { + "value": "svfloat32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_f64", + "arguments": [], + "return_type": { + "value": "svfloat64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_s16", + "arguments": [], + "return_type": { + "value": "svint16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_s32", + "arguments": [], + "return_type": { + "value": "svint32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_s64", + "arguments": [], + "return_type": { + "value": "svint64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_s8", + "arguments": [], + "return_type": { + "value": "svint8x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_u16", + "arguments": [], + "return_type": { + "value": "svuint16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_u32", + "arguments": [], + "return_type": { + "value": "svuint32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_u64", + "arguments": [], + "return_type": { + "value": "svuint64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_u8", + "arguments": [], + "return_type": { + "value": "svuint8x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_f16", + "arguments": [], + "return_type": { + "value": "svfloat16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_f32", + "arguments": [], + "return_type": { + "value": "svfloat32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_f64", + "arguments": [], + "return_type": { + "value": "svfloat64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_s16", + "arguments": [], + "return_type": { + "value": "svint16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_s32", + "arguments": [], + "return_type": { + "value": "svint32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_s64", + "arguments": [], + "return_type": { + "value": "svint64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_s8", + "arguments": [], + "return_type": { + "value": "svint8x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_u16", + "arguments": [], + "return_type": { + "value": "svuint16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_u32", + "arguments": [], + "return_type": { + "value": "svuint32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_u64", + "arguments": [], + "return_type": { + "value": "svuint64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_u8", + "arguments": [], + "return_type": { + "value": "svuint8x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svundef4_b", + "arguments": [], + "return_type": { + "value": "svboolx4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_f16", + "arguments": [], + "return_type": { + "value": "svfloat16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_f32", + "arguments": [], + "return_type": { + "value": "svfloat32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_f64", + "arguments": [], + "return_type": { + "value": "svfloat64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_s16", + "arguments": [], + "return_type": { + "value": "svint16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_s32", + "arguments": [], + "return_type": { + "value": "svint32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_s64", + "arguments": [], + "return_type": { + "value": "svint64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_s8", + "arguments": [], + "return_type": { + "value": "svint8x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_u16", + "arguments": [], + "return_type": { + "value": "svuint16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_u32", + "arguments": [], + "return_type": { + "value": "svuint32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_u64", + "arguments": [], + "return_type": { + "value": "svuint64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_u8", + "arguments": [], + "return_type": { + "value": "svuint8x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_f16", + "arguments": [], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_f32", + "arguments": [], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_f64", + "arguments": [], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_s16", + "arguments": [], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_s32", + "arguments": [], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_s64", + "arguments": [], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_s8", + "arguments": [], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_u16", + "arguments": [], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_u32", + "arguments": [], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_u64", + "arguments": [], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_u8", + "arguments": [], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpkhi[_b]", + "arguments": [ + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PUNPKHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpkhi[_s16]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUNPKHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpkhi[_s32]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUNPKHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpkhi[_s64]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUNPKHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpkhi[_u16]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UUNPKHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpkhi[_u32]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UUNPKHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpkhi[_u64]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UUNPKHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpklo[_b]", + "arguments": [ + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PUNPKLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpklo[_s16]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUNPKLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpklo[_s32]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUNPKLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpklo[_s64]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUNPKLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpklo[_u16]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UUNPKLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpklo[_u32]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UUNPKLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpklo[_u64]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UUNPKLO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svusdot[_n_s32]", + "arguments": [ + "svint32_t op1", + "svuint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USDOT" + ], + [ + "MOVPRFX", + "USDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svusdot[_s32]", + "arguments": [ + "svint32_t op1", + "svuint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USDOT" + ], + [ + "MOVPRFX", + "USDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svusdot_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svuint8_t op2", + "svint8_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USDOT" + ], + [ + "MOVPRFX", + "USDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svusmmla[_s32]", + "arguments": [ + "svint32_t op1", + "svuint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USMMLA" + ], + [ + "MOVPRFX", + "USMMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1_b16", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.H" + }, + "op2": { + "register": "Pop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1_b32", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.S" + }, + "op2": { + "register": "Pop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1_b64", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.D" + }, + "op2": { + "register": "Pop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1_b8", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2_b16", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.H" + }, + "op2": { + "register": "Pop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2_b32", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.S" + }, + "op2": { + "register": "Pop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2_b64", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.D" + }, + "op2": { + "register": "Pop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2_b8", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b16[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b16[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b16[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b16[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b16[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b16[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b32[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b32[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b32[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b32[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b32[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b32[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b64[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b64[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b64[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b64[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b64[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b64[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b8[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b8[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b8[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b8[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b8[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b8[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c16[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c16[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c32[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c32[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c64[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c64[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c8[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c8[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b16[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b16[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b16[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b16[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b16[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b16[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b32[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b32[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b32[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b32[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b32[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b32[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b64[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b64[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b64[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b64[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b64[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b64[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b8[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b8[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b8[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b8[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b8[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b8[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c16[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c16[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c32[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c32[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c64[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c64[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c8[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c8[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b16[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b16[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b16[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b16[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b16[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b16[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b32[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b32[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b32[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b32[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b32[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b32[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b64[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b64[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b64[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b64[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b64[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b64[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b8[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b8[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b8[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b8[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b8[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b8[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c16[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c16[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c32[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c32[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c64[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c64[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c8[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c8[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b16[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b16[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b16[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b16[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b16[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b16[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b32[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b32[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b32[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b32[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b32[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b32[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b64[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b64[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b64[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b64[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b64[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b64[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b8[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b8[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b8[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b8[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b8[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b8[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c16[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c16[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c32[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c32[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c64[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c64[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c8[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c8[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_f16]", + "arguments": [ + "const float16_t *op1", + "const float16_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_f32]", + "arguments": [ + "const float32_t *op1", + "const float32_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_f64]", + "arguments": [ + "const float64_t *op1", + "const float64_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_s16]", + "arguments": [ + "const int16_t *op1", + "const int16_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_s32]", + "arguments": [ + "const int32_t *op1", + "const int32_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_s64]", + "arguments": [ + "const int64_t *op1", + "const int64_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_s8]", + "arguments": [ + "const int8_t *op1", + "const int8_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_u16]", + "arguments": [ + "const uint16_t *op1", + "const uint16_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_u32]", + "arguments": [ + "const uint32_t *op1", + "const uint32_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_u64]", + "arguments": [ + "const uint64_t *op1", + "const uint64_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_u8]", + "arguments": [ + "const uint8_t *op1", + "const uint8_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_f16]", + "arguments": [ + "const float16_t *op1", + "const float16_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_f32]", + "arguments": [ + "const float32_t *op1", + "const float32_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_f64]", + "arguments": [ + "const float64_t *op1", + "const float64_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_s16]", + "arguments": [ + "const int16_t *op1", + "const int16_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_s32]", + "arguments": [ + "const int32_t *op1", + "const int32_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_s64]", + "arguments": [ + "const int64_t *op1", + "const int64_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_s8]", + "arguments": [ + "const int8_t *op1", + "const int8_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_u16]", + "arguments": [ + "const uint16_t *op1", + "const uint16_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_u32]", + "arguments": [ + "const uint32_t *op1", + "const uint32_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_u64]", + "arguments": [ + "const uint64_t *op1", + "const uint64_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_u8]", + "arguments": [ + "const uint8_t *op1", + "const uint8_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwrffr", + "arguments": [ + "svbool_t op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WRFFR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1_b16", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.H" + }, + "op2": { + "register": "Pop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1_b32", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.S" + }, + "op2": { + "register": "Pop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1_b64", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.D" + }, + "op2": { + "register": "Pop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1_b8", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2_b16", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.H" + }, + "op2": { + "register": "Pop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2_b32", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.S" + }, + "op2": { + "register": "Pop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2_b64", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.D" + }, + "op2": { + "register": "Pop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2_b8", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaba_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b", + "int16x4_t c" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaba_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b", + "int32x2_t c" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaba_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b", + "int8x8_t c" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaba_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b", + "uint16x4_t c" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaba_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b", + "uint32x2_t c" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaba_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b", + "uint8x8_t c" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_high_s16", + "arguments": [ + "int32x4_t a", + "int16x8_t b", + "int16x8_t c" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABAL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_high_s32", + "arguments": [ + "int64x2_t a", + "int32x4_t b", + "int32x4_t c" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABAL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_high_s8", + "arguments": [ + "int16x8_t a", + "int8x16_t b", + "int8x16_t c" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABAL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_high_u16", + "arguments": [ + "uint32x4_t a", + "uint16x8_t b", + "uint16x8_t c" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABAL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_high_u32", + "arguments": [ + "uint64x2_t a", + "uint32x4_t b", + "uint32x4_t c" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABAL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_high_u8", + "arguments": [ + "uint16x8_t a", + "uint8x16_t b", + "uint8x16_t c" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABAL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_s16", + "arguments": [ + "int32x4_t a", + "int16x4_t b", + "int16x4_t c" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABAL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_s32", + "arguments": [ + "int64x2_t a", + "int32x2_t b", + "int32x2_t c" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABAL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_s8", + "arguments": [ + "int16x8_t a", + "int8x8_t b", + "int8x8_t c" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABAL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_u16", + "arguments": [ + "uint32x4_t a", + "uint16x4_t b", + "uint16x4_t c" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABAL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_u32", + "arguments": [ + "uint64x2_t a", + "uint32x2_t b", + "uint32x2_t c" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABAL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_u8", + "arguments": [ + "uint16x8_t a", + "uint8x8_t b", + "uint8x8_t c" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABAL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabaq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b", + "int16x8_t c" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabaq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b", + "int32x4_t c" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabaq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b", + "int8x16_t c" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabaq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b", + "uint16x8_t c" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabaq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabaq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b", + "uint8x16_t c" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabd_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabd_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabd_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabd_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabd_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabd_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabd_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabd_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabd_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdd_f64", + "arguments": [ + "float64_t a", + "float64_t b" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdh_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_high_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_high_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_high_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_high_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_high_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_high_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabds_f32", + "arguments": [ + "float32_t a", + "float32_t b" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabs_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabs_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabs_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabs_s16", + "arguments": [ + "int16x4_t a" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabs_s32", + "arguments": [ + "int32x2_t a" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabs_s64", + "arguments": [ + "int64x1_t a" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabs_s8", + "arguments": [ + "int8x8_t a" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabsd_s64", + "arguments": [ + "int64_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabsh_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabsq_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabsq_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabsq_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabsq_s16", + "arguments": [ + "int16x8_t a" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabsq_s32", + "arguments": [ + "int32x4_t a" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabsq_s64", + "arguments": [ + "int64x2_t a" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabsq_s8", + "arguments": [ + "int8x16_t a" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_p16", + "arguments": [ + "poly16x4_t a", + "poly16x4_t b" + ], + "return_type": { + "value": "poly16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_p64", + "arguments": [ + "poly64x1_t a", + "poly64x1_t b" + ], + "return_type": { + "value": "poly64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_p8", + "arguments": [ + "poly8x8_t a", + "poly8x8_t b" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_s64", + "arguments": [ + "int64x1_t a", + "int64x1_t b" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_u64", + "arguments": [ + "uint64x1_t a", + "uint64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddd_s64", + "arguments": [ + "int64_t a", + "int64_t b" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddd_u64", + "arguments": [ + "uint64_t a", + "uint64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddh_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_high_s16", + "arguments": [ + "int8x8_t r", + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + }, + "r": { + "register": "Vd.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHN2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_high_s32", + "arguments": [ + "int16x4_t r", + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHN2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_high_s64", + "arguments": [ + "int32x2_t r", + "int64x2_t a", + "int64x2_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHN2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_high_u16", + "arguments": [ + "uint8x8_t r", + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + }, + "r": { + "register": "Vd.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHN2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_high_u32", + "arguments": [ + "uint16x4_t r", + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHN2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_high_u64", + "arguments": [ + "uint32x2_t r", + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHN2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADDHN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADDHN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADDHN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADDHN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADDHN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADDHN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_high_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_high_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_high_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_high_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_high_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_high_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SADDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SADDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SADDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UADDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UADDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UADDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlv_s16", + "arguments": [ + "int16x4_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlv_s32", + "arguments": [ + "int32x2_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlv_s8", + "arguments": [ + "int8x8_t a" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlv_u16", + "arguments": [ + "uint16x4_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlv_u32", + "arguments": [ + "uint32x2_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlv_u8", + "arguments": [ + "uint8x8_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlvq_s16", + "arguments": [ + "int16x8_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlvq_s32", + "arguments": [ + "int32x4_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlvq_s8", + "arguments": [ + "int8x16_t a" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlvq_u16", + "arguments": [ + "uint16x8_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlvq_u32", + "arguments": [ + "uint32x4_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlvq_u8", + "arguments": [ + "uint8x16_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_p128", + "arguments": [ + "poly128_t a", + "poly128_t b" + ], + "return_type": { + "value": "poly128_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_p16", + "arguments": [ + "poly16x8_t a", + "poly16x8_t b" + ], + "return_type": { + "value": "poly16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_p64", + "arguments": [ + "poly64x2_t a", + "poly64x2_t b" + ], + "return_type": { + "value": "poly64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_p8", + "arguments": [ + "poly8x16_t a", + "poly8x16_t b" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddv_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADDP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddv_s16", + "arguments": [ + "int16x4_t a" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddv_s32", + "arguments": [ + "int32x2_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddv_s8", + "arguments": [ + "int8x8_t a" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddv_u16", + "arguments": [ + "uint16x4_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddv_u32", + "arguments": [ + "uint32x2_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddv_u8", + "arguments": [ + "uint8x8_t a" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddvq_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADDP", + "FADDP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddvq_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADDP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddvq_s16", + "arguments": [ + "int16x8_t a" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddvq_s32", + "arguments": [ + "int32x4_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddvq_s64", + "arguments": [ + "int64x2_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddvq_s8", + "arguments": [ + "int8x16_t a" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddvq_u16", + "arguments": [ + "uint16x8_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddvq_u32", + "arguments": [ + "uint32x4_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddvq_u64", + "arguments": [ + "uint64x2_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddvq_u8", + "arguments": [ + "uint8x16_t a" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_high_s16", + "arguments": [ + "int32x4_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDW2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_high_s32", + "arguments": [ + "int64x2_t a", + "int32x4_t b" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDW2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_high_s8", + "arguments": [ + "int16x8_t a", + "int8x16_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDW2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_high_u16", + "arguments": [ + "uint32x4_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDW2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_high_u32", + "arguments": [ + "uint64x2_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDW2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_high_u8", + "arguments": [ + "uint16x8_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDW2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_s16", + "arguments": [ + "int32x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SADDW" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_s32", + "arguments": [ + "int64x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SADDW" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_s8", + "arguments": [ + "int16x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SADDW" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_u16", + "arguments": [ + "uint32x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UADDW" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_u32", + "arguments": [ + "uint64x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UADDW" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_u8", + "arguments": [ + "uint16x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UADDW" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaesdq_u8", + "arguments": [ + "uint8x16_t data", + "uint8x16_t key" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Vd.16B" + }, + "key": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "AESD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaeseq_u8", + "arguments": [ + "uint8x16_t data", + "uint8x16_t key" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Vd.16B" + }, + "key": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "AESE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaesimcq_u8", + "arguments": [ + "uint8x16_t data" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "AESIMC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaesmcq_u8", + "arguments": [ + "uint8x16_t data" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "AESMC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vamax_f16", + "arguments": [ + "float16x4_t vn", + "float16x4_t vm" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.4H" + }, + "vn": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vamax_f32", + "arguments": [ + "float32x2_t vn", + "float32x2_t vm" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.2S" + }, + "vn": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vamaxq_f16", + "arguments": [ + "float16x8_t vn", + "float16x8_t vm" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.8H" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vamaxq_f32", + "arguments": [ + "float32x4_t vn", + "float32x4_t vm" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.4S" + }, + "vn": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vamaxq_f64", + "arguments": [ + "float64x2_t vn", + "float64x2_t vm" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.2D" + }, + "vn": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vamin_f16", + "arguments": [ + "float16x4_t vn", + "float16x4_t vm" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.4H" + }, + "vn": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMIN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vamin_f32", + "arguments": [ + "float32x2_t vn", + "float32x2_t vm" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.2S" + }, + "vn": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMIN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaminq_f16", + "arguments": [ + "float16x8_t vn", + "float16x8_t vm" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.8H" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMIN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaminq_f32", + "arguments": [ + "float32x4_t vn", + "float32x4_t vm" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.4S" + }, + "vn": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMIN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaminq_f64", + "arguments": [ + "float64x2_t vn", + "float64x2_t vm" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.2D" + }, + "vn": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMIN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vand_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vand_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vand_s64", + "arguments": [ + "int64x1_t a", + "int64x1_t b" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vand_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vand_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vand_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vand_u64", + "arguments": [ + "uint64x1_t a", + "uint64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vand_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vandq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vandq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vandq_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vandq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vandq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vandq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vandq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vandq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbcaxq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b", + "int16x8_t c" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbcaxq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b", + "int32x4_t c" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbcaxq_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b", + "int64x2_t c" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbcaxq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b", + "int8x16_t c" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbcaxq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b", + "uint16x8_t c" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbcaxq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbcaxq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b", + "uint64x2_t c" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbcaxq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b", + "uint8x16_t c" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbic_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbic_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbic_s64", + "arguments": [ + "int64x1_t a", + "int64x1_t b" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbic_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbic_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbic_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbic_u64", + "arguments": [ + "uint64x1_t a", + "uint64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbic_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbicq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbicq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbicq_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbicq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbicq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbicq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbicq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbicq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_f16", + "arguments": [ + "uint16x4_t a", + "float16x4_t b", + "float16x4_t c" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_f32", + "arguments": [ + "uint32x2_t a", + "float32x2_t b", + "float32x2_t c" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_f64", + "arguments": [ + "uint64x1_t a", + "float64x1_t b", + "float64x1_t c" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_p16", + "arguments": [ + "uint16x4_t a", + "poly16x4_t b", + "poly16x4_t c" + ], + "return_type": { + "value": "poly16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_p64", + "arguments": [ + "poly64x1_t a", + "poly64x1_t b", + "poly64x1_t c" + ], + "return_type": { + "value": "poly64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_p8", + "arguments": [ + "uint8x8_t a", + "poly8x8_t b", + "poly8x8_t c" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_s16", + "arguments": [ + "uint16x4_t a", + "int16x4_t b", + "int16x4_t c" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_s32", + "arguments": [ + "uint32x2_t a", + "int32x2_t b", + "int32x2_t c" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_s64", + "arguments": [ + "uint64x1_t a", + "int64x1_t b", + "int64x1_t c" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_s8", + "arguments": [ + "uint8x8_t a", + "int8x8_t b", + "int8x8_t c" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b", + "uint16x4_t c" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b", + "uint32x2_t c" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_u64", + "arguments": [ + "uint64x1_t a", + "uint64x1_t b", + "uint64x1_t c" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b", + "uint8x8_t c" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_f16", + "arguments": [ + "uint16x8_t a", + "float16x8_t b", + "float16x8_t c" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_f32", + "arguments": [ + "uint32x4_t a", + "float32x4_t b", + "float32x4_t c" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_f64", + "arguments": [ + "uint64x2_t a", + "float64x2_t b", + "float64x2_t c" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_p16", + "arguments": [ + "uint16x8_t a", + "poly16x8_t b", + "poly16x8_t c" + ], + "return_type": { + "value": "poly16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_p64", + "arguments": [ + "poly64x2_t a", + "poly64x2_t b", + "poly64x2_t c" + ], + "return_type": { + "value": "poly64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_p8", + "arguments": [ + "uint8x16_t a", + "poly8x16_t b", + "poly8x16_t c" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_s16", + "arguments": [ + "uint16x8_t a", + "int16x8_t b", + "int16x8_t c" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_s32", + "arguments": [ + "uint32x4_t a", + "int32x4_t b", + "int32x4_t c" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_s64", + "arguments": [ + "uint64x2_t a", + "int64x2_t b", + "int64x2_t c" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_s8", + "arguments": [ + "uint8x16_t a", + "int8x16_t b", + "int8x16_t c" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b", + "uint16x8_t c" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b", + "uint64x2_t c" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b", + "uint8x16_t c" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcadd_rot270_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcadd_rot270_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcadd_rot90_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcadd_rot90_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaddq_rot270_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaddq_rot270_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaddq_rot270_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaddq_rot90_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaddq_rot90_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaddq_rot90_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcage_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcage_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcage_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaged_f64", + "arguments": [ + "float64_t a", + "float64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcageh_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcageq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcageq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcageq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcages_f32", + "arguments": [ + "float32_t a", + "float32_t b" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcagt_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcagt_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcagt_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcagtd_f64", + "arguments": [ + "float64_t a", + "float64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcagth_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcagtq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcagtq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcagtq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcagts_f32", + "arguments": [ + "float32_t a", + "float32_t b" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcale_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcale_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcale_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaled_f64", + "arguments": [ + "float64_t a", + "float64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaleh_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaleq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaleq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaleq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcales_f32", + "arguments": [ + "float32_t a", + "float32_t b" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcalt_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcalt_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcalt_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaltd_f64", + "arguments": [ + "float64_t a", + "float64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcalth_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaltq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaltq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaltq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcalts_f32", + "arguments": [ + "float32_t a", + "float32_t b" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_p64", + "arguments": [ + "poly64x1_t a", + "poly64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_p8", + "arguments": [ + "poly8x8_t a", + "poly8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_s64", + "arguments": [ + "int64x1_t a", + "int64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_u64", + "arguments": [ + "uint64x1_t a", + "uint64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqd_f64", + "arguments": [ + "float64_t a", + "float64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqd_s64", + "arguments": [ + "int64_t a", + "int64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqd_u64", + "arguments": [ + "uint64_t a", + "uint64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqh_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_p64", + "arguments": [ + "poly64x2_t a", + "poly64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_p8", + "arguments": [ + "poly8x16_t a", + "poly8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqs_f32", + "arguments": [ + "float32_t a", + "float32_t b" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_p64", + "arguments": [ + "poly64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_p8", + "arguments": [ + "poly8x8_t a" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_s16", + "arguments": [ + "int16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_s32", + "arguments": [ + "int32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_s64", + "arguments": [ + "int64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_s8", + "arguments": [ + "int8x8_t a" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_u16", + "arguments": [ + "uint16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_u32", + "arguments": [ + "uint32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_u64", + "arguments": [ + "uint64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_u8", + "arguments": [ + "uint8x8_t a" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzd_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzd_s64", + "arguments": [ + "int64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzd_u64", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzh_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_p64", + "arguments": [ + "poly64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_p8", + "arguments": [ + "poly8x16_t a" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_s16", + "arguments": [ + "int16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_s32", + "arguments": [ + "int32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_s64", + "arguments": [ + "int64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_s8", + "arguments": [ + "int8x16_t a" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_u16", + "arguments": [ + "uint16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_u32", + "arguments": [ + "uint32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_u64", + "arguments": [ + "uint64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_u8", + "arguments": [ + "uint8x16_t a" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzs_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcge_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcge_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcge_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcge_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcge_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcge_s64", + "arguments": [ + "int64x1_t a", + "int64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcge_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcge_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcge_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcge_u64", + "arguments": [ + "uint64x1_t a", + "uint64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcge_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcged_f64", + "arguments": [ + "float64_t a", + "float64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcged_s64", + "arguments": [ + "int64_t a", + "int64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcged_u64", + "arguments": [ + "uint64_t a", + "uint64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeh_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeq_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcges_f32", + "arguments": [ + "float32_t a", + "float32_t b" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgez_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgez_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgez_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgez_s16", + "arguments": [ + "int16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgez_s32", + "arguments": [ + "int32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgez_s64", + "arguments": [ + "int64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgez_s8", + "arguments": [ + "int8x8_t a" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgezd_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgezd_s64", + "arguments": [ + "int64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgezh_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgezq_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgezq_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgezq_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgezq_s16", + "arguments": [ + "int16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgezq_s32", + "arguments": [ + "int32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgezq_s64", + "arguments": [ + "int64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgezq_s8", + "arguments": [ + "int8x16_t a" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgezs_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgt_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgt_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgt_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgt_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgt_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgt_s64", + "arguments": [ + "int64x1_t a", + "int64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgt_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgt_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgt_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgt_u64", + "arguments": [ + "uint64x1_t a", + "uint64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgt_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtd_f64", + "arguments": [ + "float64_t a", + "float64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtd_s64", + "arguments": [ + "int64_t a", + "int64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtd_u64", + "arguments": [ + "uint64_t a", + "uint64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgth_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtq_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgts_f32", + "arguments": [ + "float32_t a", + "float32_t b" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtz_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtz_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtz_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtz_s16", + "arguments": [ + "int16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtz_s32", + "arguments": [ + "int32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtz_s64", + "arguments": [ + "int64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtz_s8", + "arguments": [ + "int8x8_t a" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtzd_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtzd_s64", + "arguments": [ + "int64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtzh_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtzq_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtzq_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtzq_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtzq_s16", + "arguments": [ + "int16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtzq_s32", + "arguments": [ + "int32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtzq_s64", + "arguments": [ + "int64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtzq_s8", + "arguments": [ + "int8x16_t a" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtzs_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcle_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcle_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcle_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcle_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcle_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcle_s64", + "arguments": [ + "int64x1_t a", + "int64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcle_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcle_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcle_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcle_u64", + "arguments": [ + "uint64x1_t a", + "uint64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcle_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcled_f64", + "arguments": [ + "float64_t a", + "float64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcled_s64", + "arguments": [ + "int64_t a", + "int64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcled_u64", + "arguments": [ + "uint64_t a", + "uint64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleh_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleq_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcles_f32", + "arguments": [ + "float32_t a", + "float32_t b" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclez_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclez_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclez_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclez_s16", + "arguments": [ + "int16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclez_s32", + "arguments": [ + "int32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclez_s64", + "arguments": [ + "int64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclez_s8", + "arguments": [ + "int8x8_t a" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclezd_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclezd_s64", + "arguments": [ + "int64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclezh_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclezq_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclezq_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclezq_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclezq_s16", + "arguments": [ + "int16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclezq_s32", + "arguments": [ + "int32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclezq_s64", + "arguments": [ + "int64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclezq_s8", + "arguments": [ + "int8x16_t a" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclezs_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcls_s16", + "arguments": [ + "int16x4_t a" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcls_s32", + "arguments": [ + "int32x2_t a" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcls_s8", + "arguments": [ + "int8x8_t a" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcls_u16", + "arguments": [ + "uint16x4_t a" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcls_u32", + "arguments": [ + "uint32x2_t a" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcls_u8", + "arguments": [ + "uint8x8_t a" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclsq_s16", + "arguments": [ + "int16x8_t a" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclsq_s32", + "arguments": [ + "int32x4_t a" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclsq_s8", + "arguments": [ + "int8x16_t a" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclsq_u16", + "arguments": [ + "uint16x8_t a" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclsq_u32", + "arguments": [ + "uint32x4_t a" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclsq_u8", + "arguments": [ + "uint8x16_t a" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclt_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclt_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclt_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclt_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclt_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclt_s64", + "arguments": [ + "int64x1_t a", + "int64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclt_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclt_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclt_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclt_u64", + "arguments": [ + "uint64x1_t a", + "uint64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclt_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltd_f64", + "arguments": [ + "float64_t a", + "float64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltd_s64", + "arguments": [ + "int64_t a", + "int64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltd_u64", + "arguments": [ + "uint64_t a", + "uint64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclth_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltq_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclts_f32", + "arguments": [ + "float32_t a", + "float32_t b" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltz_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltz_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltz_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltz_s16", + "arguments": [ + "int16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltz_s32", + "arguments": [ + "int32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltz_s64", + "arguments": [ + "int64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltz_s8", + "arguments": [ + "int8x8_t a" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltzd_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltzd_s64", + "arguments": [ + "int64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltzh_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltzq_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltzq_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltzq_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltzq_s16", + "arguments": [ + "int16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltzq_s32", + "arguments": [ + "int32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltzq_s64", + "arguments": [ + "int64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltzq_s8", + "arguments": [ + "int8x16_t a" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltzs_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclz_s16", + "arguments": [ + "int16x4_t a" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclz_s32", + "arguments": [ + "int32x2_t a" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclz_s8", + "arguments": [ + "int8x8_t a" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclz_u16", + "arguments": [ + "uint16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclz_u32", + "arguments": [ + "uint32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclz_u8", + "arguments": [ + "uint8x8_t a" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclzq_s16", + "arguments": [ + "int16x8_t a" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclzq_s32", + "arguments": [ + "int32x4_t a" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclzq_s8", + "arguments": [ + "int8x16_t a" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclzq_u16", + "arguments": [ + "uint16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclzq_u32", + "arguments": [ + "uint32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclzq_u8", + "arguments": [ + "uint8x16_t a" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_lane_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x4_t b", + "const int lane" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_lane_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x2_t b", + "const int lane" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 0 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_laneq_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x8_t b", + "const int lane" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_laneq_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x4_t b", + "const int lane" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot180_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot180_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot180_lane_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x4_t b", + "const int lane" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot180_lane_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x2_t b", + "const int lane" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 0 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot180_laneq_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x8_t b", + "const int lane" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot180_laneq_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x4_t b", + "const int lane" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot270_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot270_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot270_lane_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x4_t b", + "const int lane" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot270_lane_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x2_t b", + "const int lane" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 0 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot270_laneq_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x8_t b", + "const int lane" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot270_laneq_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x4_t b", + "const int lane" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot90_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot90_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot90_lane_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x4_t b", + "const int lane" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot90_lane_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x2_t b", + "const int lane" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 0 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot90_laneq_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x8_t b", + "const int lane" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot90_laneq_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x4_t b", + "const int lane" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_f64", + "arguments": [ + "float64x2_t r", + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + }, + "r": { + "register": "Vd.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_lane_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x4_t b", + "const int lane" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_lane_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x2_t b", + "const int lane" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 0 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_laneq_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x8_t b", + "const int lane" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_laneq_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x4_t b", + "const int lane" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot180_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot180_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot180_f64", + "arguments": [ + "float64x2_t r", + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + }, + "r": { + "register": "Vd.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot180_lane_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x4_t b", + "const int lane" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot180_lane_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x2_t b", + "const int lane" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 0 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot180_laneq_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x8_t b", + "const int lane" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot180_laneq_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x4_t b", + "const int lane" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot270_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot270_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot270_f64", + "arguments": [ + "float64x2_t r", + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + }, + "r": { + "register": "Vd.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot270_lane_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x4_t b", + "const int lane" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot270_lane_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x2_t b", + "const int lane" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 0 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot270_laneq_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x8_t b", + "const int lane" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot270_laneq_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x4_t b", + "const int lane" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot90_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot90_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot90_f64", + "arguments": [ + "float64x2_t r", + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + }, + "r": { + "register": "Vd.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot90_lane_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x4_t b", + "const int lane" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot90_lane_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x2_t b", + "const int lane" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 0 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot90_laneq_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x8_t b", + "const int lane" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot90_laneq_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x4_t b", + "const int lane" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcnt_p8", + "arguments": [ + "poly8x8_t a" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CNT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcnt_s8", + "arguments": [ + "int8x8_t a" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CNT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcnt_u8", + "arguments": [ + "uint8x8_t a" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CNT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcntq_p8", + "arguments": [ + "poly8x16_t a" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CNT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcntq_s8", + "arguments": [ + "int8x16_t a" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CNT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcntq_u8", + "arguments": [ + "uint8x16_t a" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CNT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_f16", + "arguments": [ + "float16x4_t low", + "float16x4_t high" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.4H" + }, + "low": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_f32", + "arguments": [ + "float32x2_t low", + "float32x2_t high" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.2S" + }, + "low": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_f64", + "arguments": [ + "float64x1_t low", + "float64x1_t high" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.1D" + }, + "low": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_p16", + "arguments": [ + "poly16x4_t low", + "poly16x4_t high" + ], + "return_type": { + "value": "poly16x8_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.4H" + }, + "low": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_p64", + "arguments": [ + "poly64x1_t low", + "poly64x1_t high" + ], + "return_type": { + "value": "poly64x2_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.1D" + }, + "low": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_p8", + "arguments": [ + "poly8x8_t low", + "poly8x8_t high" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.8B" + }, + "low": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_s16", + "arguments": [ + "int16x4_t low", + "int16x4_t high" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.4H" + }, + "low": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_s32", + "arguments": [ + "int32x2_t low", + "int32x2_t high" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.2S" + }, + "low": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_s64", + "arguments": [ + "int64x1_t low", + "int64x1_t high" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.1D" + }, + "low": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_s8", + "arguments": [ + "int8x8_t low", + "int8x8_t high" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.8B" + }, + "low": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_u16", + "arguments": [ + "uint16x4_t low", + "uint16x4_t high" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.4H" + }, + "low": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_u32", + "arguments": [ + "uint32x2_t low", + "uint32x2_t high" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.2S" + }, + "low": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_u64", + "arguments": [ + "uint64x1_t low", + "uint64x1_t high" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.1D" + }, + "low": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_u8", + "arguments": [ + "uint8x8_t low", + "uint8x8_t high" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.8B" + }, + "low": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_f32", + "arguments": [ + "float32x2_t a", + "const int lane1", + "float32x2_t b", + "const int lane2" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_f64", + "arguments": [ + "float64x1_t a", + "const int lane1", + "float64x1_t b", + "const int lane2" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "UNUSED" + }, + "b": { + "register": "Vn.1D" + }, + "lane1": { + "minimum": 0, + "maximum": 0 + }, + "lane2": { + "minimum": 0, + "maximum": 0 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_p16", + "arguments": [ + "poly16x4_t a", + "const int lane1", + "poly16x4_t b", + "const int lane2" + ], + "return_type": { + "value": "poly16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vn.4H" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_p64", + "arguments": [ + "poly64x1_t a", + "const int lane1", + "poly64x1_t b", + "const int lane2" + ], + "return_type": { + "value": "poly64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "UNUSED" + }, + "b": { + "register": "Vn.1D" + }, + "lane1": { + "minimum": 0, + "maximum": 0 + }, + "lane2": { + "minimum": 0, + "maximum": 0 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_p8", + "arguments": [ + "poly8x8_t a", + "const int lane1", + "poly8x8_t b", + "const int lane2" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_s16", + "arguments": [ + "int16x4_t a", + "const int lane1", + "int16x4_t b", + "const int lane2" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vn.4H" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_s32", + "arguments": [ + "int32x2_t a", + "const int lane1", + "int32x2_t b", + "const int lane2" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_s64", + "arguments": [ + "int64x1_t a", + "const int lane1", + "int64x1_t b", + "const int lane2" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "UNUSED" + }, + "b": { + "register": "Vn.1D" + }, + "lane1": { + "minimum": 0, + "maximum": 0 + }, + "lane2": { + "minimum": 0, + "maximum": 0 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_s8", + "arguments": [ + "int8x8_t a", + "const int lane1", + "int8x8_t b", + "const int lane2" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_u16", + "arguments": [ + "uint16x4_t a", + "const int lane1", + "uint16x4_t b", + "const int lane2" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vn.4H" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_u32", + "arguments": [ + "uint32x2_t a", + "const int lane1", + "uint32x2_t b", + "const int lane2" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_u64", + "arguments": [ + "uint64x1_t a", + "const int lane1", + "uint64x1_t b", + "const int lane2" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "UNUSED" + }, + "b": { + "register": "Vn.1D" + }, + "lane1": { + "minimum": 0, + "maximum": 0 + }, + "lane2": { + "minimum": 0, + "maximum": 0 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_u8", + "arguments": [ + "uint8x8_t a", + "const int lane1", + "uint8x8_t b", + "const int lane2" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_f32", + "arguments": [ + "float32x2_t a", + "const int lane1", + "float32x4_t b", + "const int lane2" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2S" + }, + "b": { + "register": "Vn.4S" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_f64", + "arguments": [ + "float64x1_t a", + "const int lane1", + "float64x2_t b", + "const int lane2" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "UNUSED" + }, + "b": { + "register": "Vn.2D" + }, + "lane1": { + "minimum": 0, + "maximum": 0 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_p16", + "arguments": [ + "poly16x4_t a", + "const int lane1", + "poly16x8_t b", + "const int lane2" + ], + "return_type": { + "value": "poly16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vn.8H" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_p64", + "arguments": [ + "poly64x1_t a", + "const int lane1", + "poly64x2_t b", + "const int lane2" + ], + "return_type": { + "value": "poly64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "UNUSED" + }, + "b": { + "register": "Vn.2D" + }, + "lane1": { + "minimum": 0, + "maximum": 0 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_p8", + "arguments": [ + "poly8x8_t a", + "const int lane1", + "poly8x16_t b", + "const int lane2" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.16B" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_s16", + "arguments": [ + "int16x4_t a", + "const int lane1", + "int16x8_t b", + "const int lane2" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vn.8H" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_s32", + "arguments": [ + "int32x2_t a", + "const int lane1", + "int32x4_t b", + "const int lane2" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2S" + }, + "b": { + "register": "Vn.4S" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_s64", + "arguments": [ + "int64x1_t a", + "const int lane1", + "int64x2_t b", + "const int lane2" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "UNUSED" + }, + "b": { + "register": "Vn.2D" + }, + "lane1": { + "minimum": 0, + "maximum": 0 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_s8", + "arguments": [ + "int8x8_t a", + "const int lane1", + "int8x16_t b", + "const int lane2" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.16B" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_u16", + "arguments": [ + "uint16x4_t a", + "const int lane1", + "uint16x8_t b", + "const int lane2" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vn.8H" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_u32", + "arguments": [ + "uint32x2_t a", + "const int lane1", + "uint32x4_t b", + "const int lane2" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2S" + }, + "b": { + "register": "Vn.4S" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_u64", + "arguments": [ + "uint64x1_t a", + "const int lane1", + "uint64x2_t b", + "const int lane2" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "UNUSED" + }, + "b": { + "register": "Vn.2D" + }, + "lane1": { + "minimum": 0, + "maximum": 0 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_u8", + "arguments": [ + "uint8x8_t a", + "const int lane1", + "uint8x16_t b", + "const int lane2" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.16B" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_f32", + "arguments": [ + "float32x4_t a", + "const int lane1", + "float32x2_t b", + "const int lane2" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.2S" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_f64", + "arguments": [ + "float64x2_t a", + "const int lane1", + "float64x1_t b", + "const int lane2" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.1D" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 0 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_p16", + "arguments": [ + "poly16x8_t a", + "const int lane1", + "poly16x4_t b", + "const int lane2" + ], + "return_type": { + "value": "poly16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.4H" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_p64", + "arguments": [ + "poly64x2_t a", + "const int lane1", + "poly64x1_t b", + "const int lane2" + ], + "return_type": { + "value": "poly64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.1D" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 0 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_p8", + "arguments": [ + "poly8x16_t a", + "const int lane1", + "poly8x8_t b", + "const int lane2" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.8B" + }, + "lane1": { + "minimum": 0, + "maximum": 15 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_s16", + "arguments": [ + "int16x8_t a", + "const int lane1", + "int16x4_t b", + "const int lane2" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.4H" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_s32", + "arguments": [ + "int32x4_t a", + "const int lane1", + "int32x2_t b", + "const int lane2" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.2S" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_s64", + "arguments": [ + "int64x2_t a", + "const int lane1", + "int64x1_t b", + "const int lane2" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.1D" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 0 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_s8", + "arguments": [ + "int8x16_t a", + "const int lane1", + "int8x8_t b", + "const int lane2" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.8B" + }, + "lane1": { + "minimum": 0, + "maximum": 15 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_u16", + "arguments": [ + "uint16x8_t a", + "const int lane1", + "uint16x4_t b", + "const int lane2" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.4H" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_u32", + "arguments": [ + "uint32x4_t a", + "const int lane1", + "uint32x2_t b", + "const int lane2" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.2S" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_u64", + "arguments": [ + "uint64x2_t a", + "const int lane1", + "uint64x1_t b", + "const int lane2" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.1D" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 0 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_u8", + "arguments": [ + "uint8x16_t a", + "const int lane1", + "uint8x8_t b", + "const int lane2" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.8B" + }, + "lane1": { + "minimum": 0, + "maximum": 15 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_f32", + "arguments": [ + "float32x4_t a", + "const int lane1", + "float32x4_t b", + "const int lane2" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_f64", + "arguments": [ + "float64x2_t a", + "const int lane1", + "float64x2_t b", + "const int lane2" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2D" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_p16", + "arguments": [ + "poly16x8_t a", + "const int lane1", + "poly16x8_t b", + "const int lane2" + ], + "return_type": { + "value": "poly16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8H" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_p64", + "arguments": [ + "poly64x2_t a", + "const int lane1", + "poly64x2_t b", + "const int lane2" + ], + "return_type": { + "value": "poly64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2D" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_p8", + "arguments": [ + "poly8x16_t a", + "const int lane1", + "poly8x16_t b", + "const int lane2" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "lane1": { + "minimum": 0, + "maximum": 15 + }, + "lane2": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_s16", + "arguments": [ + "int16x8_t a", + "const int lane1", + "int16x8_t b", + "const int lane2" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8H" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_s32", + "arguments": [ + "int32x4_t a", + "const int lane1", + "int32x4_t b", + "const int lane2" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_s64", + "arguments": [ + "int64x2_t a", + "const int lane1", + "int64x2_t b", + "const int lane2" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2D" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_s8", + "arguments": [ + "int8x16_t a", + "const int lane1", + "int8x16_t b", + "const int lane2" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "lane1": { + "minimum": 0, + "maximum": 15 + }, + "lane2": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_u16", + "arguments": [ + "uint16x8_t a", + "const int lane1", + "uint16x8_t b", + "const int lane2" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8H" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_u32", + "arguments": [ + "uint32x4_t a", + "const int lane1", + "uint32x4_t b", + "const int lane2" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_u64", + "arguments": [ + "uint64x2_t a", + "const int lane1", + "uint64x2_t b", + "const int lane2" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2D" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_u8", + "arguments": [ + "uint8x16_t a", + "const int lane1", + "uint8x16_t b", + "const int lane2" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "lane1": { + "minimum": 0, + "maximum": 15 + }, + "lane2": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_f16", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_f32", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_f64", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_p16", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "poly16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_p64", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "poly64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_p8", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_s16", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_s32", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_s64", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_s8", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_u16", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_u32", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_u64", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_u8", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_f16_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_f16_s16", + "arguments": [ + "int16x4_t a" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_f16_u16", + "arguments": [ + "uint16x4_t a" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_f32_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_f32_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_f32_s32", + "arguments": [ + "int32x2_t a" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_f32_u32", + "arguments": [ + "uint32x2_t a" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_f64_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_f64_s64", + "arguments": [ + "int64x1_t a" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_f64_u64", + "arguments": [ + "uint64x1_t a" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_high_f16_f32", + "arguments": [ + "float16x4_t r", + "float32x4_t a" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTN2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_high_f32_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_high_f32_f64", + "arguments": [ + "float32x2_t r", + "float64x2_t a" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTN2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_high_f64_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_f16_s16", + "arguments": [ + "int16x4_t a", + "const int n" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_f16_u16", + "arguments": [ + "uint16x4_t a", + "const int n" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_f32_s32", + "arguments": [ + "int32x2_t a", + "const int n" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_f32_u32", + "arguments": [ + "uint32x2_t a", + "const int n" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_f64_s64", + "arguments": [ + "int64x1_t a", + "const int n" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_f64_u64", + "arguments": [ + "uint64x1_t a", + "const int n" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_s16_f16", + "arguments": [ + "float16x4_t a", + "const int n" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_s32_f32", + "arguments": [ + "float32x2_t a", + "const int n" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_s64_f64", + "arguments": [ + "float64x1_t a", + "const int n" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_u16_f16", + "arguments": [ + "float16x4_t a", + "const int n" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_u32_f32", + "arguments": [ + "float32x2_t a", + "const int n" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_u64_f64", + "arguments": [ + "float64x1_t a", + "const int n" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_s16_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_s32_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_s64_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_u16_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_u32_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_u64_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvta_s16_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvta_s32_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvta_s64_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvta_u16_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvta_u32_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvta_u64_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtad_s64_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtad_u64_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtah_s16_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtah_s32_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtah_s64_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtah_u16_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtah_u32_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtah_u64_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtaq_s16_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtaq_s32_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtaq_s64_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtaq_u16_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtaq_u32_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtaq_u64_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtas_s32_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtas_u32_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtd_f64_s64", + "arguments": [ + "int64_t a" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtd_f64_u64", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtd_n_f64_s64", + "arguments": [ + "int64_t a", + "const int n" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtd_n_f64_u64", + "arguments": [ + "uint64_t a", + "const int n" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtd_n_s64_f64", + "arguments": [ + "float64_t a", + "const int n" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtd_n_u64_f64", + "arguments": [ + "float64_t a", + "const int n" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtd_s64_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtd_u64_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_f16_s16", + "arguments": [ + "int16_t a" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_f16_s32", + "arguments": [ + "int32_t a" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_f16_s64", + "arguments": [ + "int64_t a" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_f16_u16", + "arguments": [ + "uint16_t a" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_f16_u32", + "arguments": [ + "uint32_t a" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_f16_u64", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_f16_s16", + "arguments": [ + "int16_t a", + "const int n" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_f16_s32", + "arguments": [ + "int32_t a", + "const int n" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_f16_s64", + "arguments": [ + "int64_t a", + "const int n" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_f16_u16", + "arguments": [ + "uint16_t a", + "const int n" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_f16_u32", + "arguments": [ + "uint32_t a", + "const int n" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_f16_u64", + "arguments": [ + "uint64_t a", + "const int n" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_s16_f16", + "arguments": [ + "float16_t a", + "const int n" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_s32_f16", + "arguments": [ + "float16_t a", + "const int n" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_s64_f16", + "arguments": [ + "float16_t a", + "const int n" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_u16_f16", + "arguments": [ + "float16_t a", + "const int n" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_u32_f16", + "arguments": [ + "float16_t a", + "const int n" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_u64_f16", + "arguments": [ + "float16_t a", + "const int n" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_s16_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_s32_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_s64_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_u16_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_u32_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_u64_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtm_s16_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtm_s32_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtm_s64_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtm_u16_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtm_u32_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtm_u64_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmd_s64_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmd_u64_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmh_s16_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmh_s32_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmh_s64_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmh_u16_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmh_u32_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmh_u64_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmq_s16_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmq_s32_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmq_s64_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmq_u16_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmq_u32_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmq_u64_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtms_s32_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtms_u32_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtn_s16_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtn_s32_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtn_s64_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtn_u16_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtn_u32_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtn_u64_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnd_s64_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnd_u64_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnh_s16_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnh_s32_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnh_s64_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnh_u16_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnh_u32_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnh_u64_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnq_s16_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnq_s32_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnq_s64_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnq_u16_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnq_u32_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnq_u64_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtns_s32_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtns_u32_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtp_s16_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtp_s32_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtp_s64_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtp_u16_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtp_u32_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtp_u64_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtpd_s64_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtpd_u64_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtph_s16_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtph_s32_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtph_s64_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtph_u16_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtph_u32_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtph_u64_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtpq_s16_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtpq_s32_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtpq_s64_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtpq_u16_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtpq_u32_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtpq_u64_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtps_s32_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtps_u32_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_f16_s16", + "arguments": [ + "int16x8_t a" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_f16_u16", + "arguments": [ + "uint16x8_t a" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_f32_s32", + "arguments": [ + "int32x4_t a" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_f32_u32", + "arguments": [ + "uint32x4_t a" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_f64_s64", + "arguments": [ + "int64x2_t a" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_f64_u64", + "arguments": [ + "uint64x2_t a" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_f16_s16", + "arguments": [ + "int16x8_t a", + "const int n" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_f16_u16", + "arguments": [ + "uint16x8_t a", + "const int n" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_f32_s32", + "arguments": [ + "int32x4_t a", + "const int n" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_f32_u32", + "arguments": [ + "uint32x4_t a", + "const int n" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_f64_s64", + "arguments": [ + "int64x2_t a", + "const int n" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_f64_u64", + "arguments": [ + "uint64x2_t a", + "const int n" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_s16_f16", + "arguments": [ + "float16x8_t a", + "const int n" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_s32_f32", + "arguments": [ + "float32x4_t a", + "const int n" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_s64_f64", + "arguments": [ + "float64x2_t a", + "const int n" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_u16_f16", + "arguments": [ + "float16x8_t a", + "const int n" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_u32_f32", + "arguments": [ + "float32x4_t a", + "const int n" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_u64_f64", + "arguments": [ + "float64x2_t a", + "const int n" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_s16_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_s32_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_s64_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_u16_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_u32_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_u64_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvts_f32_s32", + "arguments": [ + "int32_t a" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvts_f32_u32", + "arguments": [ + "uint32_t a" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvts_n_f32_s32", + "arguments": [ + "int32_t a", + "const int n" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvts_n_f32_u32", + "arguments": [ + "uint32_t a", + "const int n" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvts_n_s32_f32", + "arguments": [ + "float32_t a", + "const int n" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvts_n_u32_f32", + "arguments": [ + "float32_t a", + "const int n" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvts_s32_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvts_u32_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtx_f32_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTXN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtx_high_f32_f64", + "arguments": [ + "float32x2_t r", + "float64x2_t a" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTXN2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtxd_f32_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTXN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdiv_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdiv_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdiv_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdivh_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdivq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdivq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdivq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdot_lane_s32", + "arguments": [ + "int32x2_t r", + "int8x8_t a", + "int8x8_t b", + "const int lane" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdot_lane_u32", + "arguments": [ + "uint32x2_t r", + "uint8x8_t a", + "uint8x8_t b", + "const int lane" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdot_laneq_s32", + "arguments": [ + "int32x2_t r", + "int8x8_t a", + "int8x16_t b", + "const int lane" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdot_laneq_u32", + "arguments": [ + "uint32x2_t r", + "uint8x8_t a", + "uint8x16_t b", + "const int lane" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdot_s32", + "arguments": [ + "int32x2_t r", + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdot_u32", + "arguments": [ + "uint32x2_t r", + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdotq_lane_s32", + "arguments": [ + "int32x4_t r", + "int8x16_t a", + "int8x8_t b", + "const int lane" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdotq_lane_u32", + "arguments": [ + "uint32x4_t r", + "uint8x16_t a", + "uint8x8_t b", + "const int lane" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdotq_laneq_s32", + "arguments": [ + "int32x4_t r", + "int8x16_t a", + "int8x16_t b", + "const int lane" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdotq_laneq_u32", + "arguments": [ + "uint32x4_t r", + "uint8x16_t a", + "uint8x16_t b", + "const int lane" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdotq_s32", + "arguments": [ + "int32x4_t r", + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdotq_u32", + "arguments": [ + "uint32x4_t r", + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_f16", + "arguments": [ + "float16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_f32", + "arguments": [ + "float32x2_t vec", + "const int lane" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_f64", + "arguments": [ + "float64x1_t vec", + "const int lane" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "vec": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_p16", + "arguments": [ + "poly16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "poly16x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_p64", + "arguments": [ + "poly64x1_t vec", + "const int lane" + ], + "return_type": { + "value": "poly64x1_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "vec": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_p8", + "arguments": [ + "poly8x8_t vec", + "const int lane" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_s16", + "arguments": [ + "int16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_s32", + "arguments": [ + "int32x2_t vec", + "const int lane" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_s64", + "arguments": [ + "int64x1_t vec", + "const int lane" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "vec": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_s8", + "arguments": [ + "int8x8_t vec", + "const int lane" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_u16", + "arguments": [ + "uint16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_u32", + "arguments": [ + "uint32x2_t vec", + "const int lane" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_u64", + "arguments": [ + "uint64x1_t vec", + "const int lane" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "vec": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_u8", + "arguments": [ + "uint8x8_t vec", + "const int lane" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_f16", + "arguments": [ + "float16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_f32", + "arguments": [ + "float32x4_t vec", + "const int lane" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_f64", + "arguments": [ + "float64x2_t vec", + "const int lane" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_p16", + "arguments": [ + "poly16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "poly16x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_p64", + "arguments": [ + "poly64x2_t vec", + "const int lane" + ], + "return_type": { + "value": "poly64x1_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_p8", + "arguments": [ + "poly8x16_t vec", + "const int lane" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 15 + }, + "vec": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_s16", + "arguments": [ + "int16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_s32", + "arguments": [ + "int32x4_t vec", + "const int lane" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_s64", + "arguments": [ + "int64x2_t vec", + "const int lane" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_s8", + "arguments": [ + "int8x16_t vec", + "const int lane" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 15 + }, + "vec": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_u16", + "arguments": [ + "uint16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_u32", + "arguments": [ + "uint32x4_t vec", + "const int lane" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_u64", + "arguments": [ + "uint64x2_t vec", + "const int lane" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_u8", + "arguments": [ + "uint8x16_t vec", + "const int lane" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 15 + }, + "vec": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_f16", + "arguments": [ + "float16_t value" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_f32", + "arguments": [ + "float32_t value" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_f64", + "arguments": [ + "float64_t value" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_p16", + "arguments": [ + "poly16_t value" + ], + "return_type": { + "value": "poly16x4_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_p64", + "arguments": [ + "poly64_t value" + ], + "return_type": { + "value": "poly64x1_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_p8", + "arguments": [ + "poly8_t value" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_s16", + "arguments": [ + "int16_t value" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_s32", + "arguments": [ + "int32_t value" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_s64", + "arguments": [ + "int64_t value" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_s8", + "arguments": [ + "int8_t value" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_u16", + "arguments": [ + "uint16_t value" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_u32", + "arguments": [ + "uint32_t value" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_u64", + "arguments": [ + "uint64_t value" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_u8", + "arguments": [ + "uint8_t value" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupb_lane_p8", + "arguments": [ + "poly8x8_t vec", + "const int lane" + ], + "return_type": { + "value": "poly8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupb_lane_s8", + "arguments": [ + "int8x8_t vec", + "const int lane" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupb_lane_u8", + "arguments": [ + "uint8x8_t vec", + "const int lane" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupb_laneq_p8", + "arguments": [ + "poly8x16_t vec", + "const int lane" + ], + "return_type": { + "value": "poly8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 15 + }, + "vec": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupb_laneq_s8", + "arguments": [ + "int8x16_t vec", + "const int lane" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 15 + }, + "vec": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupb_laneq_u8", + "arguments": [ + "uint8x16_t vec", + "const int lane" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 15 + }, + "vec": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupd_lane_f64", + "arguments": [ + "float64x1_t vec", + "const int lane" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "vec": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupd_lane_s64", + "arguments": [ + "int64x1_t vec", + "const int lane" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "vec": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupd_lane_u64", + "arguments": [ + "uint64x1_t vec", + "const int lane" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "vec": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupd_laneq_f64", + "arguments": [ + "float64x2_t vec", + "const int lane" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupd_laneq_s64", + "arguments": [ + "int64x2_t vec", + "const int lane" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupd_laneq_u64", + "arguments": [ + "uint64x2_t vec", + "const int lane" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vduph_lane_f16", + "arguments": [ + "float16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vduph_lane_p16", + "arguments": [ + "poly16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "poly16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vduph_lane_s16", + "arguments": [ + "int16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vduph_lane_u16", + "arguments": [ + "uint16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vduph_laneq_f16", + "arguments": [ + "float16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vduph_laneq_p16", + "arguments": [ + "poly16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "poly16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vduph_laneq_s16", + "arguments": [ + "int16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vduph_laneq_u16", + "arguments": [ + "uint16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_f16", + "arguments": [ + "float16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_f32", + "arguments": [ + "float32x2_t vec", + "const int lane" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_f64", + "arguments": [ + "float64x1_t vec", + "const int lane" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "vec": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_p16", + "arguments": [ + "poly16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "poly16x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_p64", + "arguments": [ + "poly64x1_t vec", + "const int lane" + ], + "return_type": { + "value": "poly64x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "vec": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_p8", + "arguments": [ + "poly8x8_t vec", + "const int lane" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_s16", + "arguments": [ + "int16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_s32", + "arguments": [ + "int32x2_t vec", + "const int lane" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_s64", + "arguments": [ + "int64x1_t vec", + "const int lane" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "vec": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_s8", + "arguments": [ + "int8x8_t vec", + "const int lane" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_u16", + "arguments": [ + "uint16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_u32", + "arguments": [ + "uint32x2_t vec", + "const int lane" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_u64", + "arguments": [ + "uint64x1_t vec", + "const int lane" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "vec": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_u8", + "arguments": [ + "uint8x8_t vec", + "const int lane" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_f16", + "arguments": [ + "float16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_f32", + "arguments": [ + "float32x4_t vec", + "const int lane" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_f64", + "arguments": [ + "float64x2_t vec", + "const int lane" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_p16", + "arguments": [ + "poly16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "poly16x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_p64", + "arguments": [ + "poly64x2_t vec", + "const int lane" + ], + "return_type": { + "value": "poly64x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_p8", + "arguments": [ + "poly8x16_t vec", + "const int lane" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 15 + }, + "vec": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_s16", + "arguments": [ + "int16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_s32", + "arguments": [ + "int32x4_t vec", + "const int lane" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_s64", + "arguments": [ + "int64x2_t vec", + "const int lane" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_s8", + "arguments": [ + "int8x16_t vec", + "const int lane" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 15 + }, + "vec": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_u16", + "arguments": [ + "uint16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_u32", + "arguments": [ + "uint32x4_t vec", + "const int lane" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_u64", + "arguments": [ + "uint64x2_t vec", + "const int lane" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_u8", + "arguments": [ + "uint8x16_t vec", + "const int lane" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 15 + }, + "vec": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_f16", + "arguments": [ + "float16_t value" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_f32", + "arguments": [ + "float32_t value" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_f64", + "arguments": [ + "float64_t value" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_p16", + "arguments": [ + "poly16_t value" + ], + "return_type": { + "value": "poly16x8_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_p64", + "arguments": [ + "poly64_t value" + ], + "return_type": { + "value": "poly64x2_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_p8", + "arguments": [ + "poly8_t value" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_s16", + "arguments": [ + "int16_t value" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_s32", + "arguments": [ + "int32_t value" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_s64", + "arguments": [ + "int64_t value" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_s8", + "arguments": [ + "int8_t value" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_u16", + "arguments": [ + "uint16_t value" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_u32", + "arguments": [ + "uint32_t value" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_u64", + "arguments": [ + "uint64_t value" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_u8", + "arguments": [ + "uint8_t value" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdups_lane_f32", + "arguments": [ + "float32x2_t vec", + "const int lane" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdups_lane_s32", + "arguments": [ + "int32x2_t vec", + "const int lane" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdups_lane_u32", + "arguments": [ + "uint32x2_t vec", + "const int lane" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdups_laneq_f32", + "arguments": [ + "float32x4_t vec", + "const int lane" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdups_laneq_s32", + "arguments": [ + "int32x4_t vec", + "const int lane" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdups_laneq_u32", + "arguments": [ + "uint32x4_t vec", + "const int lane" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor3q_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b", + "int16x8_t c" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor3q_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b", + "int32x4_t c" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor3q_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b", + "int64x2_t c" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor3q_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b", + "int8x16_t c" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor3q_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b", + "uint16x8_t c" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor3q_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor3q_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b", + "uint64x2_t c" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor3q_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b", + "uint8x16_t c" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor_s64", + "arguments": [ + "int64x1_t a", + "int64x1_t b" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor_u64", + "arguments": [ + "uint64x1_t a", + "uint64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veorq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veorq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veorq_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veorq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veorq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veorq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veorq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veorq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vext_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b", + "const int n" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "n": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EXT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vext_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b", + "const int n" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "n": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EXT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vext_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b", + "const int n" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "n": { + "minimum": 0, + "maximum": 0 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vext_p16", + "arguments": [ + "poly16x4_t a", + "poly16x4_t b", + "const int n" + ], + "return_type": { + "value": "poly16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "n": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EXT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vext_p64", + "arguments": [ + "poly64x1_t a", + "poly64x1_t b", + "const int n" + ], + "return_type": { + "value": "poly64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "n": { + "minimum": 0, + "maximum": 0 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "EXT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vext_p8", + "arguments": [ + "poly8x8_t a", + "poly8x8_t b", + "const int n" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "n": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EXT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vext_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b", + "const int n" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "n": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EXT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vext_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b", + "const int n" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "n": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EXT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vext_s64", + "arguments": [ + "int64x1_t a", + "int64x1_t b", + "const int n" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "n": { + "minimum": 0, + "maximum": 0 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EXT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vext_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b", + "const int n" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "n": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EXT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vext_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b", + "const int n" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "n": { "minimum": 0, "maximum": 3 } @@ -30097,7 +233346,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.4H " + "register": "Vd.4H" }, "b": { "register": "Vn.4H" @@ -30680,7 +233929,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.8H " + "register": "Vd.8H" }, "b": { "register": "Vn.8H" @@ -30846,8 +234095,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vd.2H" + }, "r": { "register": "Vd.2S" } @@ -30875,8 +234128,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -30908,8 +234165,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -30941,8 +234202,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -30974,8 +234239,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -31006,8 +234275,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vd.2H" + }, "r": { "register": "Vd.2S" } @@ -31034,8 +234307,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vd.4H" + }, "r": { "register": "Vd.4S" } @@ -31063,8 +234340,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -31096,8 +234377,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -31129,8 +234414,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -31162,8 +234451,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -31194,8 +234487,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vd.4H" + }, "r": { "register": "Vd.4S" } @@ -31222,8 +234519,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vd.2H" + }, "r": { "register": "Vd.2S" } @@ -31251,8 +234552,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -31284,8 +234589,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -31317,8 +234626,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -31350,8 +234663,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -31382,8 +234699,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vd.2H" + }, "r": { "register": "Vd.2S" } @@ -31410,8 +234731,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vd.4H" + }, "r": { "register": "Vd.4S" } @@ -31439,8 +234764,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -31472,8 +234801,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -31505,8 +234838,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -31538,8 +234875,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -31570,8 +234911,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vd.4H" + }, "r": { "register": "Vd.4S" } @@ -31911,7 +235256,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.4H " + "register": "Vd.4H" }, "b": { "register": "Vn.4H" @@ -32492,7 +235837,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.8H " + "register": "Vd.8H" }, "b": { "register": "Vn.8H" @@ -34868,230 +238213,6 @@ ] ] }, - { - "SIMD_ISA": "Neon", - "name": "vldap1_lane_u64", - "arguments": [ - "uint64_t const * ptr", - "uint64x1_t src", - "const int lane" - ], - "return_type": { - "value": "uint64x1_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt.1D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDAP1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vldap1_lane_s64", - "arguments": [ - "int64_t const * ptr", - "int64x1_t src", - "const int lane" - ], - "return_type": { - "value": "int64x1_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt.1D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDAP1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vldap1q_lane_u64", - "arguments": [ - "uint64_t const * ptr", - "uint64x2_t src", - "const int lane" - ], - "return_type": { - "value": "uint64x2_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDAP1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vldap1q_lane_s64", - "arguments": [ - "int64_t const * ptr", - "int64x2_t src", - "const int lane" - ], - "return_type": { - "value": "int64x2_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDAP1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vldap1_lane_p64", - "arguments": [ - "poly64_t const * ptr", - "poly64x1_t src", - "const int lane" - ], - "return_type": { - "value": "poly64x1_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt.1D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDAP1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vldap1q_lane_p64", - "arguments": [ - "poly64_t const * ptr", - "poly64x2_t src", - "const int lane" - ], - "return_type": { - "value": "poly64x2_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDAP1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vldap1q_lane_f64", - "arguments": [ - "float64_t const * ptr", - "float64x2_t src", - "const int lane" - ], - "return_type": { - "value": "float64x2_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDAP1" - ] - ] - }, { "SIMD_ISA": "Neon", "name": "vld1_dup_f16", @@ -39947,7 +243068,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { "register": "Vt2.4H" } }, @@ -39981,7 +243105,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { "register": "Vt2.2S" } }, @@ -40015,7 +243142,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { "register": "Vt2.1D" } }, @@ -40047,7 +243177,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { "register": "Vt2.4H" } }, @@ -40081,7 +243214,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { "register": "Vt2.1D" } }, @@ -40113,7 +243249,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { "register": "Vt2.8B" } }, @@ -40147,7 +243286,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { "register": "Vt2.4H" } }, @@ -40181,7 +243323,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { "register": "Vt2.2S" } }, @@ -40215,7 +243360,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { "register": "Vt2.1D" } }, @@ -40247,7 +243395,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { "register": "Vt2.8B" } }, @@ -40281,7 +243432,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { "register": "Vt2.4H" } }, @@ -40315,7 +243469,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { "register": "Vt2.2S" } }, @@ -40349,7 +243506,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { "register": "Vt2.1D" } }, @@ -40381,7 +243541,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { "register": "Vt2.8B" } }, @@ -41104,7 +244267,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { "register": "Vt2.8H" } }, @@ -41138,7 +244304,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { "register": "Vt2.4S" } }, @@ -41172,7 +244341,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { "register": "Vt2.2D" } }, @@ -41204,7 +244376,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { "register": "Vt2.8H" } }, @@ -41238,7 +244413,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { "register": "Vt2.2D" } }, @@ -41270,7 +244448,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { "register": "Vt2.16B" } }, @@ -41302,7 +244483,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { "register": "Vt2.8H" } }, @@ -41336,7 +244520,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { "register": "Vt2.4S" } }, @@ -41370,7 +244557,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { "register": "Vt2.2D" } }, @@ -41402,7 +244592,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { "register": "Vt2.16B" } }, @@ -41434,7 +244627,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { "register": "Vt2.8H" } }, @@ -41468,7 +244664,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { "register": "Vt2.4S" } }, @@ -41502,7 +244701,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { "register": "Vt2.2D" } }, @@ -41534,7 +244736,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { "register": "Vt2.16B" } }, @@ -42255,7 +245460,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { "register": "Vt3.4H" } }, @@ -42289,7 +245500,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { + "register": "Vt2.2S" + }, + "src.val[2]": { "register": "Vt3.2S" } }, @@ -42323,7 +245540,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { "register": "Vt3.1D" } }, @@ -42355,7 +245578,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { "register": "Vt3.4H" } }, @@ -42389,7 +245618,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { "register": "Vt3.1D" } }, @@ -42421,7 +245656,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { + "register": "Vt2.8B" + }, + "src.val[2]": { "register": "Vt3.8B" } }, @@ -42455,7 +245696,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { "register": "Vt3.4H" } }, @@ -42489,7 +245736,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { + "register": "Vt2.2S" + }, + "src.val[2]": { "register": "Vt3.2S" } }, @@ -42523,7 +245776,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { "register": "Vt3.1D" } }, @@ -42555,7 +245814,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { + "register": "Vt2.8B" + }, + "src.val[2]": { "register": "Vt3.8B" } }, @@ -42589,7 +245854,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { "register": "Vt3.4H" } }, @@ -42623,7 +245894,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { + "register": "Vt2.2S" + }, + "src.val[2]": { "register": "Vt3.2S" } }, @@ -42657,7 +245934,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { "register": "Vt3.1D" } }, @@ -42689,7 +245972,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { + "register": "Vt2.8B" + }, + "src.val[2]": { "register": "Vt3.8B" } }, @@ -43412,7 +246701,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { "register": "Vt3.8H" } }, @@ -43446,7 +246741,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { + "register": "Vt2.4S" + }, + "src.val[2]": { "register": "Vt3.4S" } }, @@ -43480,7 +246781,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { "register": "Vt3.2D" } }, @@ -43512,7 +246819,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { "register": "Vt3.8H" } }, @@ -43546,7 +246859,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { "register": "Vt3.2D" } }, @@ -43578,7 +246897,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { + "register": "Vt2.16B" + }, + "src.val[2]": { "register": "Vt3.16B" } }, @@ -43610,7 +246935,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { "register": "Vt3.8H" } }, @@ -43644,7 +246975,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { + "register": "Vt2.4S" + }, + "src.val[2]": { "register": "Vt3.4S" } }, @@ -43678,7 +247015,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { "register": "Vt3.2D" } }, @@ -43710,7 +247053,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { + "register": "Vt2.16B" + }, + "src.val[2]": { "register": "Vt3.16B" } }, @@ -43742,7 +247091,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { "register": "Vt3.8H" } }, @@ -43776,7 +247131,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { + "register": "Vt2.4S" + }, + "src.val[2]": { "register": "Vt3.4S" } }, @@ -43810,7 +247171,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { "register": "Vt3.2D" } }, @@ -43842,7 +247209,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { + "register": "Vt2.16B" + }, + "src.val[2]": { "register": "Vt3.16B" } }, @@ -44563,7 +247936,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { + "register": "Vt3.4H" + }, + "src.val[3]": { "register": "Vt4.4H" } }, @@ -44597,7 +247979,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { + "register": "Vt2.2S" + }, + "src.val[2]": { + "register": "Vt3.2S" + }, + "src.val[3]": { "register": "Vt4.2S" } }, @@ -44631,7 +248022,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { + "register": "Vt3.1D" + }, + "src.val[3]": { "register": "Vt4.1D" } }, @@ -44663,7 +248063,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { + "register": "Vt3.4H" + }, + "src.val[3]": { "register": "Vt4.4H" } }, @@ -44697,7 +248106,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { + "register": "Vt3.1D" + }, + "src.val[3]": { "register": "Vt4.1D" } }, @@ -44729,7 +248147,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { + "register": "Vt2.8B" + }, + "src.val[2]": { + "register": "Vt3.8B" + }, + "src.val[3]": { "register": "Vt4.8B" } }, @@ -44763,7 +248190,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { + "register": "Vt3.4H" + }, + "src.val[3]": { "register": "Vt4.4H" } }, @@ -44797,7 +248233,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { + "register": "Vt2.2S" + }, + "src.val[2]": { + "register": "Vt3.2S" + }, + "src.val[3]": { "register": "Vt4.2S" } }, @@ -44831,7 +248276,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { + "register": "Vt3.1D" + }, + "src.val[3]": { "register": "Vt4.1D" } }, @@ -44863,7 +248317,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { + "register": "Vt2.8B" + }, + "src.val[2]": { + "register": "Vt3.8B" + }, + "src.val[3]": { "register": "Vt4.8B" } }, @@ -44897,7 +248360,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { + "register": "Vt3.4H" + }, + "src.val[3]": { "register": "Vt4.4H" } }, @@ -44931,7 +248403,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { + "register": "Vt2.2S" + }, + "src.val[2]": { + "register": "Vt3.2S" + }, + "src.val[3]": { "register": "Vt4.2S" } }, @@ -44962,10 +248443,19 @@ "minimum": 0, "maximum": 0 }, - "ptr": { - "register": "Xn" + "ptr": { + "register": "Xn" + }, + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" }, - "src": { + "src.val[2]": { + "register": "Vt3.1D" + }, + "src.val[3]": { "register": "Vt4.1D" } }, @@ -44997,7 +248487,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { + "register": "Vt2.8B" + }, + "src.val[2]": { + "register": "Vt3.8B" + }, + "src.val[3]": { "register": "Vt4.8B" } }, @@ -45720,7 +249219,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { + "register": "Vt3.8H" + }, + "src.val[3]": { "register": "Vt4.8H" } }, @@ -45754,7 +249262,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { + "register": "Vt2.4S" + }, + "src.val[2]": { + "register": "Vt3.4S" + }, + "src.val[3]": { "register": "Vt4.4S" } }, @@ -45788,7 +249305,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { + "register": "Vt3.2D" + }, + "src.val[3]": { "register": "Vt4.2D" } }, @@ -45820,7 +249346,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { + "register": "Vt3.8H" + }, + "src.val[3]": { "register": "Vt4.8H" } }, @@ -45854,7 +249389,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { + "register": "Vt3.2D" + }, + "src.val[3]": { "register": "Vt4.2D" } }, @@ -45886,7 +249430,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { + "register": "Vt2.16B" + }, + "src.val[2]": { + "register": "Vt3.16B" + }, + "src.val[3]": { "register": "Vt4.16B" } }, @@ -45918,7 +249471,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { + "register": "Vt3.8H" + }, + "src.val[3]": { "register": "Vt4.8H" } }, @@ -45952,7 +249514,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { + "register": "Vt2.4S" + }, + "src.val[2]": { + "register": "Vt3.4S" + }, + "src.val[3]": { "register": "Vt4.4S" } }, @@ -45986,7 +249557,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { + "register": "Vt3.2D" + }, + "src.val[3]": { "register": "Vt4.2D" } }, @@ -46018,7 +249598,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { + "register": "Vt2.16B" + }, + "src.val[2]": { + "register": "Vt3.16B" + }, + "src.val[3]": { "register": "Vt4.16B" } }, @@ -46050,7 +249639,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { + "register": "Vt3.8H" + }, + "src.val[3]": { "register": "Vt4.8H" } }, @@ -46084,7 +249682,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { + "register": "Vt2.4S" + }, + "src.val[2]": { + "register": "Vt3.4S" + }, + "src.val[3]": { "register": "Vt4.4S" } }, @@ -46118,7 +249725,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { + "register": "Vt3.2D" + }, + "src.val[3]": { "register": "Vt4.2D" } }, @@ -46150,7 +249766,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { + "register": "Vt2.16B" + }, + "src.val[2]": { + "register": "Vt3.16B" + }, + "src.val[3]": { "register": "Vt4.16B" } }, @@ -46434,915 +250059,89 @@ }, { "SIMD_ISA": "Neon", - "name": "vldrq_p128", - "arguments": [ - "poly128_t const * ptr" - ], - "return_type": { - "value": "poly128_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "A32", - "A64" - ], - "instructions": [ - [ - "LDR" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmax_f16", - "arguments": [ - "float16x4_t a", - "float16x4_t b" - ], - "return_type": { - "value": "float16x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" - } - }, - "Architectures": [ - "A32", - "A64" - ], - "instructions": [ - [ - "FMAX" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmax_f32", - "arguments": [ - "float32x2_t a", - "float32x2_t b" - ], - "return_type": { - "value": "float32x2_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "FMAX" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmax_f64", - "arguments": [ - "float64x1_t a", - "float64x1_t b" - ], - "return_type": { - "value": "float64x1_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Dn" - }, - "b": { - "register": "Dm" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FMAX" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmax_s16", - "arguments": [ - "int16x4_t a", - "int16x4_t b" - ], - "return_type": { - "value": "int16x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "SMAX" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmax_s32", - "arguments": [ - "int32x2_t a", - "int32x2_t b" - ], - "return_type": { - "value": "int32x2_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "SMAX" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmax_s8", - "arguments": [ - "int8x8_t a", - "int8x8_t b" - ], - "return_type": { - "value": "int8x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "SMAX" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmax_u16", - "arguments": [ - "uint16x4_t a", - "uint16x4_t b" - ], - "return_type": { - "value": "uint16x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "UMAX" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmax_u32", - "arguments": [ - "uint32x2_t a", - "uint32x2_t b" - ], - "return_type": { - "value": "uint32x2_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "UMAX" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmax_u8", - "arguments": [ - "uint8x8_t a", - "uint8x8_t b" - ], - "return_type": { - "value": "uint8x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "UMAX" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmaxh_f16", - "arguments": [ - "float16_t a", - "float16_t b" - ], - "return_type": { - "value": "float16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Hn" - }, - "b": { - "register": "Hm" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FMAX" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmaxnm_f16", - "arguments": [ - "float16x4_t a", - "float16x4_t b" - ], - "return_type": { - "value": "float16x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" - } - }, - "Architectures": [ - "A32", - "A64" - ], - "instructions": [ - [ - "FMAXNM" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmaxnm_f32", - "arguments": [ - "float32x2_t a", - "float32x2_t b" - ], - "return_type": { - "value": "float32x2_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" - } - }, - "Architectures": [ - "A32", - "A64" - ], - "instructions": [ - [ - "FMAXNM" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmaxnm_f64", + "name": "vldap1_lane_f64", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "float64_t const * ptr", + "float64x1_t src", + "const int lane" ], "return_type": { "value": "float64x1_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" - }, - "b": { - "register": "Dm" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FMAXNM" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmaxnmh_f16", - "arguments": [ - "float16_t a", - "float16_t b" - ], - "return_type": { - "value": "float16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Hn" - }, - "b": { - "register": "Hm" - } - }, - "Architectures": [ - "A32", - "A64" - ], - "instructions": [ - [ - "FMAXNM" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmaxnmq_f16", - "arguments": [ - "float16x8_t a", - "float16x8_t b" - ], - "return_type": { - "value": "float16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" - } - }, - "Architectures": [ - "A32", - "A64" - ], - "instructions": [ - [ - "FMAXNM" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmaxnmq_f32", - "arguments": [ - "float32x4_t a", - "float32x4_t b" - ], - "return_type": { - "value": "float32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" - } - }, - "Architectures": [ - "A32", - "A64" - ], - "instructions": [ - [ - "FMAXNM" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmaxnmq_f64", - "arguments": [ - "float64x2_t a", - "float64x2_t b" - ], - "return_type": { - "value": "float64x2_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FMAXNM" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmaxnmv_f16", - "arguments": [ - "float16x4_t a" - ], - "return_type": { - "value": "float16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.4H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FMAXNMP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmaxnmv_f32", - "arguments": [ - "float32x2_t a" - ], - "return_type": { - "value": "float32_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.2S" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FMAXNMP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmaxnmvq_f16", - "arguments": [ - "float16x8_t a" - ], - "return_type": { - "value": "float16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FMAXNMP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmaxnmvq_f32", - "arguments": [ - "float32x4_t a" - ], - "return_type": { - "value": "float32_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FMAXNMV" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmaxnmvq_f64", - "arguments": [ - "float64x2_t a" - ], - "return_type": { - "value": "float64_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FMAXNMP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmaxq_f16", - "arguments": [ - "float16x8_t a", - "float16x8_t b" - ], - "return_type": { - "value": "float16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" - } - }, - "Architectures": [ - "A32", - "A64" - ], - "instructions": [ - [ - "FMAX" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmaxq_f32", - "arguments": [ - "float32x4_t a", - "float32x4_t b" - ], - "return_type": { - "value": "float32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "FMAX" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmaxq_f64", - "arguments": [ - "float64x2_t a", - "float64x2_t b" - ], - "return_type": { - "value": "float64x2_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FMAX" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmaxq_s16", - "arguments": [ - "int16x8_t a", - "int16x8_t b" - ], - "return_type": { - "value": "int16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "lane": { + "minimum": 0, + "maximum": 0 }, - "b": { - "register": "Vm.8H" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "SMAX" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmaxq_s32", - "arguments": [ - "int32x4_t a", - "int32x4_t b" - ], - "return_type": { - "value": "int32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.4S" + "src": { + "register": "Vt.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMAX" + "LDAP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxq_s8", + "name": "vldap1_lane_p64", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "poly64_t const * ptr", + "poly64x1_t src", + "const int lane" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 0 }, - "b": { - "register": "Vm.16B" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "SMAX" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmaxq_u16", - "arguments": [ - "uint16x8_t a", - "uint16x8_t b" - ], - "return_type": { - "value": "uint16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.8H" + "src": { + "register": "Vt.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMAX" + "LDAP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxq_u32", + "name": "vldap1_lane_s64", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int64_t const * ptr", + "int64x1_t src", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "int64x1_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "lane": { + "minimum": 0, + "maximum": 0 }, - "b": { - "register": "Vm.4S" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "UMAX" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmaxq_u8", - "arguments": [ - "uint8x16_t a", - "uint8x16_t b" - ], - "return_type": { - "value": "uint8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.16B" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "UMAX" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmaxv_f16", - "arguments": [ - "float16x4_t a" - ], - "return_type": { - "value": "float16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.4H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FMAXP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmaxv_f32", - "arguments": [ - "float32x2_t a" - ], - "return_type": { - "value": "float32_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "src": { + "register": "Vt.1D" } }, "Architectures": [ @@ -47350,22 +250149,31 @@ ], "instructions": [ [ - "FMAXP" + "LDAP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxv_s16", + "name": "vldap1_lane_u64", "arguments": [ - "int16x4_t a" + "uint64_t const * ptr", + "uint64x1_t src", + "const int lane" ], "return_type": { - "value": "int16_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "lane": { + "minimum": 0, + "maximum": 0 + }, + "ptr": { + "register": "Xn" + }, + "src": { + "register": "Vt.1D" } }, "Architectures": [ @@ -47373,22 +250181,31 @@ ], "instructions": [ [ - "SMAXV" + "LDAP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxv_s32", + "name": "vldap1q_lane_f64", "arguments": [ - "int32x2_t a" + "float64_t const * ptr", + "float64x2_t src", + "const int lane" ], "return_type": { - "value": "int32_t" + "value": "float64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "ptr": { + "register": "Xn" + }, + "src": { + "register": "Vt.2D" } }, "Architectures": [ @@ -47396,22 +250213,31 @@ ], "instructions": [ [ - "SMAXP" + "LDAP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxv_s8", + "name": "vldap1q_lane_p64", "arguments": [ - "int8x8_t a" + "poly64_t const * ptr", + "poly64x2_t src", + "const int lane" ], "return_type": { - "value": "int8_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "ptr": { + "register": "Xn" + }, + "src": { + "register": "Vt.2D" } }, "Architectures": [ @@ -47419,22 +250245,31 @@ ], "instructions": [ [ - "SMAXV" + "LDAP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxv_u16", + "name": "vldap1q_lane_s64", "arguments": [ - "uint16x4_t a" + "int64_t const * ptr", + "int64x2_t src", + "const int lane" ], "return_type": { - "value": "uint16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "ptr": { + "register": "Xn" + }, + "src": { + "register": "Vt.2D" } }, "Architectures": [ @@ -47442,22 +250277,31 @@ ], "instructions": [ [ - "UMAXV" + "LDAP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxv_u32", + "name": "vldap1q_lane_u64", "arguments": [ - "uint32x2_t a" + "uint64_t const * ptr", + "uint64x2_t src", + "const int lane" ], "return_type": { - "value": "uint32_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "ptr": { + "register": "Xn" + }, + "src": { + "register": "Vt.2D" } }, "Architectures": [ @@ -47465,44 +250309,54 @@ ], "instructions": [ [ - "UMAXP" + "LDAP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxv_u8", + "name": "vldrq_p128", "arguments": [ - "uint8x8_t a" + "poly128_t const * ptr" ], "return_type": { - "value": "uint8_t" + "value": "poly128_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "ptr": { + "register": "Xn" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "UMAXV" + "LDR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxvq_f16", + "name": "vluti2_lane_f16", "arguments": [ - "float16x8_t a" + "float16x4_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float16_t" + "value": "float16x8_t" }, "Arguments_Preparation": { - "a": { + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { "register": "Vn.8H" } }, @@ -47511,22 +250365,31 @@ ], "instructions": [ [ - "FMAXP" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxvq_f32", + "name": "vluti2_lane_p16", "arguments": [ - "float32x4_t a" + "poly16x4_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float32_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ @@ -47534,22 +250397,31 @@ ], "instructions": [ [ - "FMAXV" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxvq_f64", + "name": "vluti2_lane_p8", "arguments": [ - "float64x2_t a" + "poly8x8_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float64_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ @@ -47557,21 +250429,30 @@ ], "instructions": [ [ - "FMAXP" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxvq_s16", + "name": "vluti2_lane_s16", "arguments": [ - "int16x8_t a" + "int16x4_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "int16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "a": { + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { "register": "Vn.8H" } }, @@ -47580,22 +250461,31 @@ ], "instructions": [ [ - "SMAXV" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxvq_s32", + "name": "vluti2_lane_s8", "arguments": [ - "int32x4_t a" + "int8x8_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "int32_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ @@ -47603,22 +250493,31 @@ ], "instructions": [ [ - "SMAXV" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxvq_s8", + "name": "vluti2_lane_u16", "arguments": [ - "int8x16_t a" + "uint16x4_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "int8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ @@ -47626,22 +250525,31 @@ ], "instructions": [ [ - "SMAXV" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxvq_u16", + "name": "vluti2_lane_u8", "arguments": [ - "uint16x8_t a" + "uint8x8_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "uint16_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ @@ -47649,22 +250557,31 @@ ], "instructions": [ [ - "UMAXV" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxvq_u32", + "name": "vluti2_laneq_f16", "arguments": [ - "uint32x4_t a" + "float16x4_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "uint32_t" + "value": "float16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "index": { + "minimum": 0, + "maximum": 7 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ @@ -47672,22 +250589,31 @@ ], "instructions": [ [ - "UMAXV" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxvq_u8", + "name": "vluti2_laneq_p16", "arguments": [ - "uint8x16_t a" + "poly16x4_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "uint8_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "index": { + "minimum": 0, + "maximum": 7 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ @@ -47695,83 +250621,95 @@ ], "instructions": [ [ - "UMAXV" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmin_f16", + "name": "vluti2_laneq_p8", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "poly8x8_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float16x4_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.4H" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMIN" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmin_f32", + "name": "vluti2_laneq_s16", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "int16x4_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float32x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "index": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.2S" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FMIN" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmin_f64", + "name": "vluti2_laneq_s8", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "int8x8_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float64x1_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Dm" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ @@ -47779,200 +250717,223 @@ ], "instructions": [ [ - "FMIN" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmin_s16", + "name": "vluti2_laneq_u16", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "uint16x4_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "int16x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "index": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.4H" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMIN" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmin_s32", + "name": "vluti2_laneq_u8", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "uint8x8_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "int32x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.2S" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMIN" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmin_s8", + "name": "vluti2q_lane_f16", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "float16x8_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "int8x8_t" + "value": "float16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.8B" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMIN" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmin_u16", + "name": "vluti2q_lane_p16", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "poly16x8_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "uint16x4_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.4H" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMIN" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmin_u32", + "name": "vluti2q_lane_p8", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "poly8x16_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "uint32x2_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "index": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.2S" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMIN" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmin_u8", + "name": "vluti2q_lane_s16", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "int16x8_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "uint8x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.8B" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMIN" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminh_f16", + "name": "vluti2q_lane_s8", "arguments": [ - "float16_t a", - "float16_t b" + "int8x16_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float16_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "index": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Hm" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ @@ -47980,82 +250941,95 @@ ], "instructions": [ [ - "FMIN" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnm_f16", + "name": "vluti2q_lane_u16", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "uint16x8_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float16x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.4H" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMINNM" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnm_f32", + "name": "vluti2q_lane_u8", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "uint8x16_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float32x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "index": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.2S" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMINNM" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnm_f64", + "name": "vluti2q_laneq_f16", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "float16x8_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float64x1_t" + "value": "float16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "index": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Dm" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ @@ -48063,110 +251037,127 @@ ], "instructions": [ [ - "FMINNM" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnmh_f16", + "name": "vluti2q_laneq_p16", "arguments": [ - "float16_t a", - "float16_t b" + "poly16x8_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float16_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "index": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Hm" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMINNM" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnmq_f16", + "name": "vluti2q_laneq_p8", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "poly8x16_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.8H" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMINNM" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnmq_f32", + "name": "vluti2q_laneq_s16", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "int16x8_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "index": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.4S" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMINNM" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnmq_f64", + "name": "vluti2q_laneq_s8", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "int8x16_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float64x2_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.2D" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ @@ -48174,22 +251165,31 @@ ], "instructions": [ [ - "FMINNM" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnmv_f16", + "name": "vluti2q_laneq_u16", "arguments": [ - "float16x4_t a" + "uint16x8_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float16_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "index": { + "minimum": 0, + "maximum": 7 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ @@ -48197,22 +251197,31 @@ ], "instructions": [ [ - "FMINNMP" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnmv_f32", + "name": "vluti2q_laneq_u8", "arguments": [ - "float32x2_t a" + "uint8x16_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float32_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ @@ -48220,22 +251229,34 @@ ], "instructions": [ [ - "FMINNMP" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnmvq_f16", + "name": "vluti4q_lane_f16_x2", "arguments": [ - "float16x8_t a" + "float16x8x2_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float16_t" + "value": "float16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" } }, "Architectures": [ @@ -48243,22 +251264,34 @@ ], "instructions": [ [ - "FMINNMP" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnmvq_f32", + "name": "vluti4q_lane_p16_x2", "arguments": [ - "float32x4_t a" + "poly16x8x2_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float32_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" } }, "Architectures": [ @@ -48266,22 +251299,31 @@ ], "instructions": [ [ - "FMINNMV" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnmvq_f64", + "name": "vluti4q_lane_p8", "arguments": [ - "float64x2_t a" + "poly8x16_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float64_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "index": { + "minimum": 0, + "maximum": 0 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ @@ -48289,83 +251331,101 @@ ], "instructions": [ [ - "FMINNMP" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminq_f16", + "name": "vluti4q_lane_s16_x2", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "int16x8x2_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float16x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "index": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.8H" + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMIN" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminq_f32", + "name": "vluti4q_lane_s8", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "int8x16_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float32x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "index": { + "minimum": 0, + "maximum": 0 }, - "b": { - "register": "Vm.4S" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FMIN" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminq_f64", + "name": "vluti4q_lane_u16_x2", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "uint16x8x2_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float64x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "index": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.2D" + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" } }, "Architectures": [ @@ -48373,196 +251433,235 @@ ], "instructions": [ [ - "FMIN" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminq_s16", + "name": "vluti4q_lane_u8", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "uint8x16_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "int16x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "index": { + "minimum": 0, + "maximum": 0 }, - "b": { - "register": "Vm.8H" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMIN" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminq_s32", + "name": "vluti4q_laneq_f16_x2", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "float16x8x2_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "int32x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.4S" + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMIN" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminq_s8", + "name": "vluti4q_laneq_p16_x2", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "poly16x8x2_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "int8x16_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.16B" + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMIN" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminq_u16", + "name": "vluti4q_laneq_p8", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "poly8x16_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "index": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.8H" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMIN" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminq_u32", + "name": "vluti4q_laneq_s16_x2", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int16x8x2_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.4S" + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMIN" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminq_u8", + "name": "vluti4q_laneq_s8", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "int8x16_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "uint8x16_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "index": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.16B" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMIN" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminv_f16", + "name": "vluti4q_laneq_u16_x2", "arguments": [ - "float16x4_t a" + "uint16x8x2_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float16_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" } }, "Architectures": [ @@ -48570,22 +251669,31 @@ ], "instructions": [ [ - "FMINP" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminv_f32", + "name": "vluti4q_laneq_u8", "arguments": [ - "float32x2_t a" + "uint8x16_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float32_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ @@ -48593,68 +251701,83 @@ ], "instructions": [ [ - "FMINP" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminv_s16", + "name": "vmax_f16", "arguments": [ - "int16x4_t a" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "int16_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "SMINV" + "FMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminv_s32", + "name": "vmax_f32", "arguments": [ - "int32x2_t a" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "int32_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMINP" + "FMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminv_s8", + "name": "vmax_f64", "arguments": [ - "int8x8_t a" + "float64x1_t a", + "float64x1_t b" ], "return_type": { - "value": "int8_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Dn" + }, + "b": { + "register": "Dm" } }, "Architectures": [ @@ -48662,160 +251785,200 @@ ], "instructions": [ [ - "SMINV" + "FMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminv_u16", + "name": "vmax_s16", "arguments": [ - "uint16x4_t a" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "uint16_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMINV" + "SMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminv_u32", + "name": "vmax_s32", "arguments": [ - "uint32x2_t a" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "uint32_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMINP" + "SMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminv_u8", + "name": "vmax_s8", "arguments": [ - "uint8x8_t a" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "uint8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMINV" + "SMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminvq_f16", + "name": "vmax_u16", "arguments": [ - "float16x8_t a" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "float16_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINP" + "UMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminvq_f32", + "name": "vmax_u32", "arguments": [ - "float32x4_t a" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "float32_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINV" + "UMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminvq_f64", + "name": "vmax_u8", "arguments": [ - "float64x2_t a" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "float64_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINP" + "UMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminvq_s16", + "name": "vmaxh_f16", "arguments": [ - "int16x8_t a" + "float16_t a", + "float16_t b" ], "return_type": { - "value": "int16_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Hn" + }, + "b": { + "register": "Hm" } }, "Architectures": [ @@ -48823,68 +251986,82 @@ ], "instructions": [ [ - "SMINV" + "FMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminvq_s32", + "name": "vmaxnm_f16", "arguments": [ - "int32x4_t a" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "int32_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "SMINV" + "FMAXNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminvq_s8", + "name": "vmaxnm_f32", "arguments": [ - "int8x16_t a" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "int8_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "SMINV" + "FMAXNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminvq_u16", + "name": "vmaxnm_f64", "arguments": [ - "uint16x8_t a" + "float64x1_t a", + "float64x1_t b" ], "return_type": { - "value": "uint16_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Dn" + }, + "b": { + "register": "Dm" } }, "Architectures": [ @@ -48892,109 +252069,110 @@ ], "instructions": [ [ - "UMINV" + "FMAXNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminvq_u32", + "name": "vmaxnmh_f16", "arguments": [ - "uint32x4_t a" + "float16_t a", + "float16_t b" ], "return_type": { - "value": "uint32_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Hn" + }, + "b": { + "register": "Hm" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "UMINV" + "FMAXNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminvq_u8", + "name": "vmaxnmq_f16", "arguments": [ - "uint8x16_t a" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "uint8_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "UMINV" + "FMAXNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_f32", + "name": "vmaxnmq_f32", "arguments": [ - "float32x2_t a", - "float32x2_t b", - "float32x2_t c" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "float32x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "N/A" + "register": "Vn.4S" }, "b": { - "register": "N/A" - }, - "c": { - "register": "N/A" + "register": "Vm.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "RESULT[I]" + "FMAXNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_f64", + "name": "vmaxnmq_f64", "arguments": [ - "float64x1_t a", - "float64x1_t b", - "float64x1_t c" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "float64x1_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "N/A" + "register": "Vn.2D" }, "b": { - "register": "N/A" - }, - "c": { - "register": "N/A" + "register": "Vm.2D" } }, "Architectures": [ @@ -49002,321 +252180,198 @@ ], "instructions": [ [ - "RESULT[I]" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmla_lane_f32", - "arguments": [ - "float32x2_t a", - "float32x2_t b", - "float32x2_t v", - "const int lane" - ], - "return_type": { - "value": "float32x2_t" - }, - "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": {} - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "RESULT[I]" + "FMAXNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_lane_s16", + "name": "vmaxnmv_f16", "arguments": [ - "int16x4_t a", - "int16x4_t b", - "int16x4_t v", - "const int lane" + "float16x4_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "FMAXNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_lane_s32", + "name": "vmaxnmv_f32", "arguments": [ - "int32x2_t a", - "int32x2_t b", - "int32x2_t v", - "const int lane" + "float32x2_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "FMAXNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_lane_u16", + "name": "vmaxnmvq_f16", "arguments": [ - "uint16x4_t a", - "uint16x4_t b", - "uint16x4_t v", - "const int lane" + "float16x8_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "register": "Vn.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "FMAXNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_lane_u32", + "name": "vmaxnmvq_f32", "arguments": [ - "uint32x2_t a", - "uint32x2_t b", - "uint32x2_t v", - "const int lane" + "float32x4_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vn.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "FMAXNMV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_laneq_f32", + "name": "vmaxnmvq_f64", "arguments": [ - "float32x2_t a", - "float32x2_t b", - "float32x4_t v", - "const int lane" + "float64x2_t a" ], "return_type": { - "value": "float32x2_t" + "value": "float64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": {} + "a": { + "register": "Vn.2D" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "RESULT[I]" + "FMAXNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_laneq_s16", + "name": "vmaxq_f16", "arguments": [ - "int16x4_t a", - "int16x4_t b", - "int16x8_t v", - "const int lane" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "int16x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.8H" }, "b": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { "register": "Vm.8H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "MLA" + "FMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_laneq_s32", + "name": "vmaxq_f32", "arguments": [ - "int32x2_t a", - "int32x2_t b", - "int32x4_t v", - "const int lane" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "int32x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.4S" }, "b": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "MLA" + "FMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_laneq_u16", + "name": "vmaxq_f64", "arguments": [ - "uint16x4_t a", - "uint16x4_t b", - "uint16x8_t v", - "const int lane" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.2D" }, "b": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vm.2D" } }, "Architectures": [ @@ -49324,66 +252379,55 @@ ], "instructions": [ [ - "MLA" + "FMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_laneq_u32", + "name": "vmaxq_s16", "arguments": [ - "uint32x2_t a", - "uint32x2_t b", - "uint32x4_t v", - "const int lane" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.8H" }, "b": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "MLA" + "SMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_n_f32", + "name": "vmaxq_s32", "arguments": [ - "float32x2_t a", - "float32x2_t b", - "float32_t c" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "float32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "N/A" + "register": "Vn.4S" }, "b": { - "register": "N/A" - }, - "c": { - "register": "N/A" + "register": "Vm.4S" } }, "Architectures": [ @@ -49393,30 +252437,26 @@ ], "instructions": [ [ - "RESULT[I]" + "SMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_n_s16", + "name": "vmaxq_s8", "arguments": [ - "int16x4_t a", - "int16x4_t b", - "int16_t c" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "int16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.16B" }, "b": { - "register": "Vn.4H" - }, - "c": { - "register": "Vm.H[0]" + "register": "Vm.16B" } }, "Architectures": [ @@ -49426,30 +252466,26 @@ ], "instructions": [ [ - "MLA" + "SMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_n_s32", + "name": "vmaxq_u16", "arguments": [ - "int32x2_t a", - "int32x2_t b", - "int32_t c" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int32x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.8H" }, "b": { - "register": "Vn.2S" - }, - "c": { - "register": "Vm.S[0]" + "register": "Vm.8H" } }, "Architectures": [ @@ -49459,30 +252495,26 @@ ], "instructions": [ [ - "MLA" + "UMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_n_u16", + "name": "vmaxq_u32", "arguments": [ - "uint16x4_t a", - "uint16x4_t b", - "uint16_t c" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.4S" }, "b": { - "register": "Vn.4H" - }, - "c": { - "register": "Vm.H[0]" + "register": "Vm.4S" } }, "Architectures": [ @@ -49492,30 +252524,26 @@ ], "instructions": [ [ - "MLA" + "UMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_n_u32", + "name": "vmaxq_u8", "arguments": [ - "uint32x2_t a", - "uint32x2_t b", - "uint32_t c" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.16B" }, "b": { - "register": "Vn.2S" - }, - "c": { - "register": "Vm.S[0]" + "register": "Vm.16B" } }, "Architectures": [ @@ -49525,233 +252553,160 @@ ], "instructions": [ [ - "MLA" + "UMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_s16", + "name": "vmaxv_f16", "arguments": [ - "int16x4_t a", - "int16x4_t b", - "int16x4_t c" + "float16x4_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { "register": "Vn.4H" - }, - "c": { - "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "FMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_s32", + "name": "vmaxv_f32", "arguments": [ - "int32x2_t a", - "int32x2_t b", - "int32x2_t c" + "float32x2_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { "register": "Vn.2S" - }, - "c": { - "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "FMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_s8", + "name": "vmaxv_s16", "arguments": [ - "int8x8_t a", - "int8x8_t b", - "int8x8_t c" + "int16x4_t a" ], "return_type": { - "value": "int8x8_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "b": { - "register": "Vn.8B" - }, - "c": { - "register": "Vm.8B" + "register": "Vn.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "SMAXV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_u16", + "name": "vmaxv_s32", "arguments": [ - "uint16x4_t a", - "uint16x4_t b", - "uint16x4_t c" + "int32x2_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.4H" - }, - "c": { - "register": "Vm.4H" + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "SMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_u32", + "name": "vmaxv_s8", "arguments": [ - "uint32x2_t a", - "uint32x2_t b", - "uint32x2_t c" + "int8x8_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.2S" - }, - "c": { - "register": "Vm.2S" + "register": "Vn.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "SMAXV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_u8", + "name": "vmaxv_u16", "arguments": [ - "uint8x8_t a", - "uint8x8_t b", - "uint8x8_t c" + "uint16x4_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "uint16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "b": { - "register": "Vn.8B" - }, - "c": { - "register": "Vm.8B" + "register": "Vn.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "UMAXV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_lane_s16", + "name": "vmaxv_u32", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16x4_t v", - "const int lane" + "uint32x2_t a" ], "return_type": { - "value": "int32x4_t" + "value": "uint32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "register": "Vm.2S" } }, "Architectures": [ @@ -49759,35 +252714,22 @@ ], "instructions": [ [ - "SMLAL2" + "UMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_lane_s32", + "name": "vmaxv_u8", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32x2_t v", - "const int lane" + "uint8x8_t a" ], "return_type": { - "value": "int64x2_t" + "value": "uint8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vn.8B" } }, "Architectures": [ @@ -49795,35 +252737,22 @@ ], "instructions": [ [ - "SMLAL2" + "UMAXV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_lane_u16", + "name": "vmaxvq_f16", "arguments": [ - "uint32x4_t a", - "uint16x8_t b", - "uint16x4_t v", - "const int lane" + "float16x8_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" } }, "Architectures": [ @@ -49831,35 +252760,22 @@ ], "instructions": [ [ - "UMLAL2" + "FMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_lane_u32", + "name": "vmaxvq_f32", "arguments": [ - "uint64x2_t a", - "uint32x4_t b", - "uint32x2_t v", - "const int lane" + "float32x4_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" } }, "Architectures": [ @@ -49867,35 +252783,22 @@ ], "instructions": [ [ - "UMLAL2" + "FMAXV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_laneq_s16", + "name": "vmaxvq_f64", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16x8_t v", - "const int lane" + "float64x2_t a" ], "return_type": { - "value": "int32x4_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vn.2D" } }, "Architectures": [ @@ -49903,35 +252806,22 @@ ], "instructions": [ [ - "SMLAL2" + "FMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_laneq_s32", + "name": "vmaxvq_s16", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32x4_t v", - "const int lane" + "int16x8_t a" ], "return_type": { - "value": "int64x2_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vn.8H" } }, "Architectures": [ @@ -49939,35 +252829,22 @@ ], "instructions": [ [ - "SMLAL2" + "SMAXV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_laneq_u16", + "name": "vmaxvq_s32", "arguments": [ - "uint32x4_t a", - "uint16x8_t b", - "uint16x8_t v", - "const int lane" + "int32x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vn.4S" } }, "Architectures": [ @@ -49975,35 +252852,22 @@ ], "instructions": [ [ - "UMLAL2" + "SMAXV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_laneq_u32", + "name": "vmaxvq_s8", "arguments": [ - "uint64x2_t a", - "uint32x4_t b", - "uint32x4_t v", - "const int lane" + "int8x16_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vn.16B" } }, "Architectures": [ @@ -50011,30 +252875,22 @@ ], "instructions": [ [ - "UMLAL2" + "SMAXV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_n_s16", + "name": "vmaxvq_u16", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16_t c" + "uint16x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "uint16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.8H" - }, - "c": { - "register": "Vm.H[0]" } }, "Architectures": [ @@ -50042,30 +252898,22 @@ ], "instructions": [ [ - "SMLAL2" + "UMAXV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_n_s32", + "name": "vmaxvq_u32", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32_t c" + "uint32x4_t a" ], "return_type": { - "value": "int64x2_t" + "value": "uint32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { "register": "Vn.4S" - }, - "c": { - "register": "Vm.S[0]" } }, "Architectures": [ @@ -50073,30 +252921,22 @@ ], "instructions": [ [ - "SMLAL2" + "UMAXV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_n_u16", + "name": "vmaxvq_u8", "arguments": [ - "uint32x4_t a", - "uint16x8_t b", - "uint16_t c" + "uint8x16_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "uint8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.H[0]" + "register": "Vn.16B" } }, "Architectures": [ @@ -50104,92 +252944,83 @@ ], "instructions": [ [ - "UMLAL2" + "UMAXV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_n_u32", + "name": "vmin_f16", "arguments": [ - "uint64x2_t a", - "uint32x4_t b", - "uint32_t c" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.4H" }, "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.S[0]" + "register": "Vm.4H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "UMLAL2" + "FMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_s16", + "name": "vmin_f32", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16x8_t c" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.2S" }, "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.8H" + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMLAL2" + "FMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_s32", + "name": "vmin_f64", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32x4_t c" + "float64x1_t a", + "float64x1_t b" ], "return_type": { - "value": "int64x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Dn" }, "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.4S" + "register": "Dm" } }, "Architectures": [ @@ -50197,159 +253028,142 @@ ], "instructions": [ [ - "SMLAL2" + "FMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_s8", + "name": "vmin_s16", "arguments": [ - "int16x8_t a", - "int8x16_t b", - "int8x16_t c" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vn.4H" }, "b": { - "register": "Vn.16B" - }, - "c": { - "register": "Vm.16B" + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMLAL2" + "SMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_u16", + "name": "vmin_s32", "arguments": [ - "uint32x4_t a", - "uint16x8_t b", - "uint16x8_t c" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.2S" }, "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.8H" + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMLAL2" + "SMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_u32", + "name": "vmin_s8", "arguments": [ - "uint64x2_t a", - "uint32x4_t b", - "uint32x4_t c" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.8B" }, "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.4S" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMLAL2" + "SMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_u8", + "name": "vmin_u16", "arguments": [ - "uint16x8_t a", - "uint8x16_t b", - "uint8x16_t c" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vn.4H" }, "b": { - "register": "Vn.16B" - }, - "c": { - "register": "Vm.16B" + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMLAL2" + "UMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_lane_s16", + "name": "vmin_u32", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16x4_t v", - "const int lane" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.2S" }, "b": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "register": "Vm.2S" } }, "Architectures": [ @@ -50359,35 +253173,26 @@ ], "instructions": [ [ - "SMLAL" + "UMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_lane_s32", + "name": "vmin_u8", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32x2_t v", - "const int lane" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.8B" }, "b": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vm.8B" } }, "Architectures": [ @@ -50397,147 +253202,109 @@ ], "instructions": [ [ - "SMLAL" + "UMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_lane_u16", + "name": "vminh_f16", "arguments": [ - "uint32x4_t a", - "uint16x4_t b", - "uint16x4_t v", - "const int lane" + "float16_t a", + "float16_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Hn" }, "b": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "register": "Hm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMLAL" + "FMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_lane_u32", + "name": "vminnm_f16", "arguments": [ - "uint64x2_t a", - "uint32x2_t b", - "uint32x2_t v", - "const int lane" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.4H" }, "b": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vm.4H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "UMLAL" + "FMINNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_laneq_s16", + "name": "vminnm_f32", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16x8_t v", - "const int lane" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.2S" }, "b": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vm.2S" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "SMLAL" + "FMINNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_laneq_s32", + "name": "vminnm_f64", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32x4_t v", - "const int lane" + "float64x1_t a", + "float64x1_t b" ], "return_type": { - "value": "int64x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Dn" }, "b": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Dm" } }, "Architectures": [ @@ -50545,366 +253312,280 @@ ], "instructions": [ [ - "SMLAL" + "FMINNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_laneq_u16", + "name": "vminnmh_f16", "arguments": [ - "uint32x4_t a", - "uint16x4_t b", - "uint16x8_t v", - "const int lane" + "float16_t a", + "float16_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Hn" }, "b": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Hm" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "UMLAL" + "FMINNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_laneq_u32", + "name": "vminnmq_f16", "arguments": [ - "uint64x2_t a", - "uint32x2_t b", - "uint32x4_t v", - "const int lane" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.8H" }, "b": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vm.8H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "UMLAL" + "FMINNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_n_s16", + "name": "vminnmq_f32", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16_t c" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.4S" }, "b": { - "register": "Vn.4H" - }, - "c": { - "register": "Vm.H[0]" + "register": "Vm.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SMLAL" + "FMINNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_n_s32", + "name": "vminnmq_f64", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32_t c" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "int64x2_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.2D" }, "b": { - "register": "Vn.2S" - }, - "c": { - "register": "Vm.S[0]" + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMLAL" + "FMINNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_n_u16", + "name": "vminnmv_f16", "arguments": [ - "uint32x4_t a", - "uint16x4_t b", - "uint16_t c" + "float16x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.4H" - }, - "c": { - "register": "Vm.H[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMLAL" + "FMINNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_n_u32", + "name": "vminnmv_f32", "arguments": [ - "uint64x2_t a", - "uint32x2_t b", - "uint32_t c" + "float32x2_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { "register": "Vn.2S" - }, - "c": { - "register": "Vm.S[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMLAL" + "FMINNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_s16", + "name": "vminnmvq_f16", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16x4_t c" + "float16x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4H" - }, - "c": { - "register": "Vm.4H" + "register": "Vn.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMLAL" + "FMINNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_s32", + "name": "vminnmvq_f32", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32x2_t c" + "float32x4_t a" ], "return_type": { - "value": "int64x2_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.2S" - }, - "c": { - "register": "Vm.2S" + "register": "Vn.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMLAL" + "FMINNMV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_s8", + "name": "vminnmvq_f64", "arguments": [ - "int16x8_t a", - "int8x8_t b", - "int8x8_t c" + "float64x2_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8B" - }, - "c": { - "register": "Vm.8B" + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMLAL" + "FMINNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_u16", + "name": "vminq_f16", "arguments": [ - "uint32x4_t a", - "uint16x4_t b", - "uint16x4_t c" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.8H" }, "b": { - "register": "Vn.4H" - }, - "c": { - "register": "Vm.4H" + "register": "Vm.8H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "UMLAL" + "FMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_u32", + "name": "vminq_f32", "arguments": [ - "uint64x2_t a", - "uint32x2_t b", - "uint32x2_t c" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.4S" }, "b": { - "register": "Vn.2S" - }, - "c": { - "register": "Vm.2S" + "register": "Vm.4S" } }, "Architectures": [ @@ -50914,63 +253595,53 @@ ], "instructions": [ [ - "UMLAL" + "FMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_u8", + "name": "vminq_f64", "arguments": [ - "uint16x8_t a", - "uint8x8_t b", - "uint8x8_t c" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vn.2D" }, "b": { - "register": "Vn.8B" - }, - "c": { - "register": "Vm.8B" + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMLAL" + "FMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_f32", + "name": "vminq_s16", "arguments": [ - "float32x4_t a", - "float32x4_t b", - "float32x4_t c" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "float32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "N/A" + "register": "Vn.8H" }, "b": { - "register": "N/A" - }, - "c": { - "register": "N/A" + "register": "Vm.8H" } }, "Architectures": [ @@ -50980,61 +253651,56 @@ ], "instructions": [ [ - "RESULT[I]" + "SMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_f64", + "name": "vminq_s32", "arguments": [ - "float64x2_t a", - "float64x2_t b", - "float64x2_t c" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "float64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "N/A" + "register": "Vn.4S" }, "b": { - "register": "N/A" - }, - "c": { - "register": "N/A" + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "RESULT[I]" + "SMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_lane_f32", + "name": "vminq_s8", "arguments": [ - "float32x4_t a", - "float32x4_t b", - "float32x2_t v", - "const int lane" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "float32x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "a": { + "register": "Vn.16B" }, - "v": {} + "b": { + "register": "Vm.16B" + } }, "Architectures": [ "v7", @@ -51043,35 +253709,26 @@ ], "instructions": [ [ - "RESULT[I]" + "SMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_lane_s16", + "name": "vminq_u16", "arguments": [ - "int16x8_t a", - "int16x8_t b", - "int16x4_t v", - "const int lane" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int16x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { "register": "Vn.8H" }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "b": { + "register": "Vm.8H" } }, "Architectures": [ @@ -51081,35 +253738,26 @@ ], "instructions": [ [ - "MLA" + "UMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_lane_s32", + "name": "vminq_u32", "arguments": [ - "int32x4_t a", - "int32x4_t b", - "int32x2_t v", - "const int lane" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.4S" }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "b": { + "register": "Vm.4S" } }, "Architectures": [ @@ -51119,35 +253767,26 @@ ], "instructions": [ [ - "MLA" + "UMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_lane_u16", + "name": "vminq_u8", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", - "uint16x4_t v", - "const int lane" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vn.16B" }, "b": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "register": "Vm.16B" } }, "Architectures": [ @@ -51157,103 +253796,68 @@ ], "instructions": [ [ - "MLA" + "UMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_lane_u32", + "name": "vminv_f16", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x2_t v", - "const int lane" + "float16x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vn.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "FMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_laneq_f32", + "name": "vminv_f32", "arguments": [ - "float32x4_t a", - "float32x4_t b", - "float32x4_t v", - "const int lane" + "float32x2_t a" ], "return_type": { - "value": "float32x4_t" + "value": "float32_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": {} + "a": { + "register": "Vn.2S" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "RESULT[I]" + "FMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_laneq_s16", + "name": "vminv_s16", "arguments": [ - "int16x8_t a", - "int16x8_t b", - "int16x8_t v", - "const int lane" + "int16x4_t a" ], "return_type": { - "value": "int16x8_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vn.4H" } }, "Architectures": [ @@ -51261,35 +253865,22 @@ ], "instructions": [ [ - "MLA" + "SMINV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_laneq_s32", + "name": "vminv_s32", "arguments": [ - "int32x4_t a", - "int32x4_t b", - "int32x4_t v", - "const int lane" + "int32x2_t a" ], "return_type": { - "value": "int32x4_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vm.2S" } }, "Architectures": [ @@ -51297,35 +253888,22 @@ ], "instructions": [ [ - "MLA" + "SMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_laneq_u16", + "name": "vminv_s8", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", - "uint16x8_t v", - "const int lane" + "int8x8_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "int8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vn.8B" } }, "Architectures": [ @@ -51333,35 +253911,22 @@ ], "instructions": [ [ - "MLA" + "SMINV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_laneq_u32", + "name": "vminv_u16", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t v", - "const int lane" + "uint16x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "uint16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vn.4H" } }, "Architectures": [ @@ -51369,376 +253934,266 @@ ], "instructions": [ [ - "MLA" + "UMINV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_n_f32", + "name": "vminv_u32", "arguments": [ - "float32x4_t a", - "float32x4_t b", - "float32_t c" + "uint32x2_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint32_t" }, "Arguments_Preparation": { "a": { - "register": "N/A" - }, - "b": { - "register": "N/A" - }, - "c": { - "register": "N/A" + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "RESULT[I]" + "UMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_n_s16", + "name": "vminv_u8", "arguments": [ - "int16x8_t a", - "int16x8_t b", - "int16_t c" + "uint8x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "uint8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.H[0]" + "register": "Vn.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "UMINV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_n_s32", + "name": "vminvq_f16", "arguments": [ - "int32x4_t a", - "int32x4_t b", - "int32_t c" + "float16x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.S[0]" + "register": "Vn.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "FMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_n_u16", + "name": "vminvq_f32", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", - "uint16_t c" + "float32x4_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.H[0]" + "register": "Vn.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "FMINV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_n_u32", + "name": "vminvq_f64", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32_t c" + "float64x2_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.S[0]" + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "FMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_s16", + "name": "vminvq_s16", "arguments": [ - "int16x8_t a", - "int16x8_t b", - "int16x8_t c" + "int16x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { "register": "Vn.8H" - }, - "c": { - "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "SMINV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_s32", + "name": "vminvq_s32", "arguments": [ - "int32x4_t a", - "int32x4_t b", - "int32x4_t c" + "int32x4_t a" ], "return_type": { - "value": "int32x4_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.4S" - }, - "c": { - "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "SMINV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_s8", + "name": "vminvq_s8", "arguments": [ - "int8x16_t a", - "int8x16_t b", - "int8x16_t c" + "int8x16_t a" ], "return_type": { - "value": "int8x16_t" + "value": "int8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "b": { "register": "Vn.16B" - }, - "c": { - "register": "Vm.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "SMINV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_u16", + "name": "vminvq_u16", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", - "uint16x8_t c" + "uint16x8_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "uint16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { "register": "Vn.8H" - }, - "c": { - "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "UMINV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_u32", + "name": "vminvq_u32", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c" + "uint32x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "uint32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.4S" - }, - "c": { - "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "UMINV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_u8", + "name": "vminvq_u8", "arguments": [ - "uint8x16_t a", - "uint8x16_t b", - "uint8x16_t c" + "uint8x16_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "uint8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "b": { "register": "Vn.16B" - }, - "c": { - "register": "Vm.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "UMINV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_f32", + "name": "vmla_f32", "arguments": [ "float32x2_t a", "float32x2_t b", @@ -51765,13 +254220,13 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_f64", + "name": "vmla_f64", "arguments": [ "float64x1_t a", "float64x1_t b", @@ -51796,13 +254251,13 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_lane_f32", + "name": "vmla_lane_f32", "arguments": [ "float32x2_t a", "float32x2_t b", @@ -51813,13 +254268,10 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, "lane": { "minimum": 0, "maximum": 1 - }, - "v": {} + } }, "Architectures": [ "v7", @@ -51828,13 +254280,13 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_lane_s16", + "name": "vmla_lane_s16", "arguments": [ "int16x4_t a", "int16x4_t b", @@ -51866,13 +254318,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_lane_s32", + "name": "vmla_lane_s32", "arguments": [ "int32x2_t a", "int32x2_t b", @@ -51904,13 +254356,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_lane_u16", + "name": "vmla_lane_u16", "arguments": [ "uint16x4_t a", "uint16x4_t b", @@ -51942,13 +254394,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_lane_u32", + "name": "vmla_lane_u32", "arguments": [ "uint32x2_t a", "uint32x2_t b", @@ -51980,13 +254432,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_laneq_f32", + "name": "vmla_laneq_f32", "arguments": [ "float32x2_t a", "float32x2_t b", @@ -51997,26 +254449,23 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, "lane": { "minimum": 0, "maximum": 3 - }, - "v": {} + } }, "Architectures": [ "A64" ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_laneq_s16", + "name": "vmla_laneq_s16", "arguments": [ "int16x4_t a", "int16x4_t b", @@ -52046,13 +254495,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_laneq_s32", + "name": "vmla_laneq_s32", "arguments": [ "int32x2_t a", "int32x2_t b", @@ -52082,13 +254531,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_laneq_u16", + "name": "vmla_laneq_u16", "arguments": [ "uint16x4_t a", "uint16x4_t b", @@ -52118,13 +254567,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_laneq_u32", + "name": "vmla_laneq_u32", "arguments": [ "uint32x2_t a", "uint32x2_t b", @@ -52154,13 +254603,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_n_f32", + "name": "vmla_n_f32", "arguments": [ "float32x2_t a", "float32x2_t b", @@ -52187,13 +254636,13 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_n_s16", + "name": "vmla_n_s16", "arguments": [ "int16x4_t a", "int16x4_t b", @@ -52220,13 +254669,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_n_s32", + "name": "vmla_n_s32", "arguments": [ "int32x2_t a", "int32x2_t b", @@ -52253,13 +254702,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_n_u16", + "name": "vmla_n_u16", "arguments": [ "uint16x4_t a", "uint16x4_t b", @@ -52286,13 +254735,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_n_u32", + "name": "vmla_n_u32", "arguments": [ "uint32x2_t a", "uint32x2_t b", @@ -52319,13 +254768,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_s16", + "name": "vmla_s16", "arguments": [ "int16x4_t a", "int16x4_t b", @@ -52352,13 +254801,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_s32", + "name": "vmla_s32", "arguments": [ "int32x2_t a", "int32x2_t b", @@ -52385,13 +254834,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_s8", + "name": "vmla_s8", "arguments": [ "int8x8_t a", "int8x8_t b", @@ -52418,13 +254867,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_u16", + "name": "vmla_u16", "arguments": [ "uint16x4_t a", "uint16x4_t b", @@ -52451,13 +254900,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_u32", + "name": "vmla_u32", "arguments": [ "uint32x2_t a", "uint32x2_t b", @@ -52484,13 +254933,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_u8", + "name": "vmla_u8", "arguments": [ "uint8x8_t a", "uint8x8_t b", @@ -52517,13 +254966,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_lane_s16", + "name": "vmlal_high_lane_s16", "arguments": [ "int32x4_t a", "int16x8_t b", @@ -52553,13 +255002,13 @@ ], "instructions": [ [ - "SMLSL2" + "SMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_lane_s32", + "name": "vmlal_high_lane_s32", "arguments": [ "int64x2_t a", "int32x4_t b", @@ -52589,13 +255038,13 @@ ], "instructions": [ [ - "SMLSL2" + "SMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_lane_u16", + "name": "vmlal_high_lane_u16", "arguments": [ "uint32x4_t a", "uint16x8_t b", @@ -52625,13 +255074,13 @@ ], "instructions": [ [ - "UMLSL2" + "UMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_lane_u32", + "name": "vmlal_high_lane_u32", "arguments": [ "uint64x2_t a", "uint32x4_t b", @@ -52661,13 +255110,13 @@ ], "instructions": [ [ - "UMLSL2" + "UMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_laneq_s16", + "name": "vmlal_high_laneq_s16", "arguments": [ "int32x4_t a", "int16x8_t b", @@ -52697,13 +255146,13 @@ ], "instructions": [ [ - "SMLSL2" + "SMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_laneq_s32", + "name": "vmlal_high_laneq_s32", "arguments": [ "int64x2_t a", "int32x4_t b", @@ -52733,13 +255182,13 @@ ], "instructions": [ [ - "SMLSL2" + "SMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_laneq_u16", + "name": "vmlal_high_laneq_u16", "arguments": [ "uint32x4_t a", "uint16x8_t b", @@ -52769,13 +255218,13 @@ ], "instructions": [ [ - "UMLSL2" + "UMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_laneq_u32", + "name": "vmlal_high_laneq_u32", "arguments": [ "uint64x2_t a", "uint32x4_t b", @@ -52805,13 +255254,13 @@ ], "instructions": [ [ - "UMLSL2" + "UMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_n_s16", + "name": "vmlal_high_n_s16", "arguments": [ "int32x4_t a", "int16x8_t b", @@ -52836,13 +255285,13 @@ ], "instructions": [ [ - "SMLSL2" + "SMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_n_s32", + "name": "vmlal_high_n_s32", "arguments": [ "int64x2_t a", "int32x4_t b", @@ -52867,13 +255316,13 @@ ], "instructions": [ [ - "SMLSL2" + "SMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_n_u16", + "name": "vmlal_high_n_u16", "arguments": [ "uint32x4_t a", "uint16x8_t b", @@ -52898,13 +255347,13 @@ ], "instructions": [ [ - "UMLSL2" + "UMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_n_u32", + "name": "vmlal_high_n_u32", "arguments": [ "uint64x2_t a", "uint32x4_t b", @@ -52929,13 +255378,13 @@ ], "instructions": [ [ - "UMLSL2" + "UMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_s16", + "name": "vmlal_high_s16", "arguments": [ "int32x4_t a", "int16x8_t b", @@ -52960,13 +255409,13 @@ ], "instructions": [ [ - "SMLSL2" + "SMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_s32", + "name": "vmlal_high_s32", "arguments": [ "int64x2_t a", "int32x4_t b", @@ -52991,13 +255440,13 @@ ], "instructions": [ [ - "SMLSL2" + "SMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_s8", + "name": "vmlal_high_s8", "arguments": [ "int16x8_t a", "int8x16_t b", @@ -53022,13 +255471,13 @@ ], "instructions": [ [ - "SMLSL2" + "SMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_u16", + "name": "vmlal_high_u16", "arguments": [ "uint32x4_t a", "uint16x8_t b", @@ -53053,13 +255502,13 @@ ], "instructions": [ [ - "UMLSL2" + "UMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_u32", + "name": "vmlal_high_u32", "arguments": [ "uint64x2_t a", "uint32x4_t b", @@ -53084,13 +255533,13 @@ ], "instructions": [ [ - "UMLSL2" + "UMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_u8", + "name": "vmlal_high_u8", "arguments": [ "uint16x8_t a", "uint8x16_t b", @@ -53115,13 +255564,13 @@ ], "instructions": [ [ - "UMLSL2" + "UMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_lane_s16", + "name": "vmlal_lane_s16", "arguments": [ "int32x4_t a", "int16x4_t b", @@ -53153,13 +255602,13 @@ ], "instructions": [ [ - "SMLSL" + "SMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_lane_s32", + "name": "vmlal_lane_s32", "arguments": [ "int64x2_t a", "int32x2_t b", @@ -53191,13 +255640,13 @@ ], "instructions": [ [ - "SMLSL" + "SMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_lane_u16", + "name": "vmlal_lane_u16", "arguments": [ "uint32x4_t a", "uint16x4_t b", @@ -53229,13 +255678,13 @@ ], "instructions": [ [ - "UMLSL" + "UMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_lane_u32", + "name": "vmlal_lane_u32", "arguments": [ "uint64x2_t a", "uint32x2_t b", @@ -53267,13 +255716,13 @@ ], "instructions": [ [ - "UMLSL" + "UMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_laneq_s16", + "name": "vmlal_laneq_s16", "arguments": [ "int32x4_t a", "int16x4_t b", @@ -53303,13 +255752,13 @@ ], "instructions": [ [ - "SMLSL" + "SMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_laneq_s32", + "name": "vmlal_laneq_s32", "arguments": [ "int64x2_t a", "int32x2_t b", @@ -53339,13 +255788,13 @@ ], "instructions": [ [ - "SMLSL" + "SMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_laneq_u16", + "name": "vmlal_laneq_u16", "arguments": [ "uint32x4_t a", "uint16x4_t b", @@ -53375,13 +255824,13 @@ ], "instructions": [ [ - "UMLSL" + "UMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_laneq_u32", + "name": "vmlal_laneq_u32", "arguments": [ "uint64x2_t a", "uint32x2_t b", @@ -53411,13 +255860,13 @@ ], "instructions": [ [ - "UMLSL" + "UMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_n_s16", + "name": "vmlal_n_s16", "arguments": [ "int32x4_t a", "int16x4_t b", @@ -53444,13 +255893,13 @@ ], "instructions": [ [ - "SMLSL" + "SMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_n_s32", + "name": "vmlal_n_s32", "arguments": [ "int64x2_t a", "int32x2_t b", @@ -53477,13 +255926,13 @@ ], "instructions": [ [ - "SMLSL" + "SMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_n_u16", + "name": "vmlal_n_u16", "arguments": [ "uint32x4_t a", "uint16x4_t b", @@ -53510,13 +255959,13 @@ ], "instructions": [ [ - "UMLSL" + "UMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_n_u32", + "name": "vmlal_n_u32", "arguments": [ "uint64x2_t a", "uint32x2_t b", @@ -53543,13 +255992,13 @@ ], "instructions": [ [ - "UMLSL" + "UMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_s16", + "name": "vmlal_s16", "arguments": [ "int32x4_t a", "int16x4_t b", @@ -53576,13 +256025,13 @@ ], "instructions": [ [ - "SMLSL" + "SMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_s32", + "name": "vmlal_s32", "arguments": [ "int64x2_t a", "int32x2_t b", @@ -53609,13 +256058,13 @@ ], "instructions": [ [ - "SMLSL" + "SMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_s8", + "name": "vmlal_s8", "arguments": [ "int16x8_t a", "int8x8_t b", @@ -53642,13 +256091,13 @@ ], "instructions": [ [ - "SMLSL" + "SMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_u16", + "name": "vmlal_u16", "arguments": [ "uint32x4_t a", "uint16x4_t b", @@ -53675,13 +256124,13 @@ ], "instructions": [ [ - "UMLSL" + "UMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_u32", + "name": "vmlal_u32", "arguments": [ "uint64x2_t a", "uint32x2_t b", @@ -53708,13 +256157,13 @@ ], "instructions": [ [ - "UMLSL" + "UMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_u8", + "name": "vmlal_u8", "arguments": [ "uint16x8_t a", "uint8x8_t b", @@ -53741,13 +256190,13 @@ ], "instructions": [ [ - "UMLSL" + "UMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_f32", + "name": "vmlaq_f32", "arguments": [ "float32x4_t a", "float32x4_t b", @@ -53774,13 +256223,13 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_f64", + "name": "vmlaq_f64", "arguments": [ "float64x2_t a", "float64x2_t b", @@ -53805,13 +256254,13 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_lane_f32", + "name": "vmlaq_lane_f32", "arguments": [ "float32x4_t a", "float32x4_t b", @@ -53822,13 +256271,10 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, "lane": { "minimum": 0, "maximum": 1 - }, - "v": {} + } }, "Architectures": [ "v7", @@ -53837,13 +256283,13 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_lane_s16", + "name": "vmlaq_lane_s16", "arguments": [ "int16x8_t a", "int16x8_t b", @@ -53875,13 +256321,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_lane_s32", + "name": "vmlaq_lane_s32", "arguments": [ "int32x4_t a", "int32x4_t b", @@ -53913,13 +256359,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_lane_u16", + "name": "vmlaq_lane_u16", "arguments": [ "uint16x8_t a", "uint16x8_t b", @@ -53951,13 +256397,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_lane_u32", + "name": "vmlaq_lane_u32", "arguments": [ "uint32x4_t a", "uint32x4_t b", @@ -53989,13 +256435,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_laneq_f32", + "name": "vmlaq_laneq_f32", "arguments": [ "float32x4_t a", "float32x4_t b", @@ -54006,26 +256452,23 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, "lane": { "minimum": 0, "maximum": 3 - }, - "v": {} + } }, "Architectures": [ "A64" ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_laneq_s16", + "name": "vmlaq_laneq_s16", "arguments": [ "int16x8_t a", "int16x8_t b", @@ -54055,13 +256498,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_laneq_s32", + "name": "vmlaq_laneq_s32", "arguments": [ "int32x4_t a", "int32x4_t b", @@ -54091,13 +256534,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_laneq_u16", + "name": "vmlaq_laneq_u16", "arguments": [ "uint16x8_t a", "uint16x8_t b", @@ -54127,13 +256570,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_laneq_u32", + "name": "vmlaq_laneq_u32", "arguments": [ "uint32x4_t a", "uint32x4_t b", @@ -54163,547 +256606,30 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_n_f32", + "name": "vmlaq_n_f32", "arguments": [ "float32x4_t a", "float32x4_t b", - "float32_t c" - ], - "return_type": { - "value": "float32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "N/A" - }, - "b": { - "register": "N/A" - }, - "c": { - "register": "N/A" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "RESULT[I]" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmlsq_n_s16", - "arguments": [ - "int16x8_t a", - "int16x8_t b", - "int16_t c" - ], - "return_type": { - "value": "int16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.H[0]" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "MLS" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmlsq_n_s32", - "arguments": [ - "int32x4_t a", - "int32x4_t b", - "int32_t c" - ], - "return_type": { - "value": "int32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.S[0]" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "MLS" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmlsq_n_u16", - "arguments": [ - "uint16x8_t a", - "uint16x8_t b", - "uint16_t c" - ], - "return_type": { - "value": "uint16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.H[0]" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "MLS" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmlsq_n_u32", - "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32_t c" - ], - "return_type": { - "value": "uint32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.S[0]" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "MLS" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmlsq_s16", - "arguments": [ - "int16x8_t a", - "int16x8_t b", - "int16x8_t c" - ], - "return_type": { - "value": "int16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.8H" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "MLS" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmlsq_s32", - "arguments": [ - "int32x4_t a", - "int32x4_t b", - "int32x4_t c" - ], - "return_type": { - "value": "int32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.4S" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "MLS" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmlsq_s8", - "arguments": [ - "int8x16_t a", - "int8x16_t b", - "int8x16_t c" - ], - "return_type": { - "value": "int8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.16B" - }, - "b": { - "register": "Vn.16B" - }, - "c": { - "register": "Vm.16B" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "MLS" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmlsq_u16", - "arguments": [ - "uint16x8_t a", - "uint16x8_t b", - "uint16x8_t c" - ], - "return_type": { - "value": "uint16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.8H" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "MLS" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmlsq_u32", - "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c" - ], - "return_type": { - "value": "uint32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.4S" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "MLS" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmlsq_u8", - "arguments": [ - "uint8x16_t a", - "uint8x16_t b", - "uint8x16_t c" - ], - "return_type": { - "value": "uint8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.16B" - }, - "b": { - "register": "Vn.16B" - }, - "c": { - "register": "Vm.16B" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "MLS" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmmlaq_s32", - "arguments": [ - "int32x4_t r", - "int8x16_t a", - "int8x16_t b" - ], - "return_type": { - "value": "int32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" - }, - "r": { - "register": "Vd.4S" - } - }, - "Architectures": [ - "A32", - "A64" - ], - "instructions": [ - [ - "SMMLA" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmmlaq_u32", - "arguments": [ - "uint32x4_t r", - "uint8x16_t a", - "uint8x16_t b" - ], - "return_type": { - "value": "uint32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" - }, - "r": { - "register": "Vd.4S" - } - }, - "Architectures": [ - "A32", - "A64" - ], - "instructions": [ - [ - "UMMLA" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmov_n_f16", - "arguments": [ - "float16_t value" - ], - "return_type": { - "value": "float16x4_t" - }, - "Arguments_Preparation": { - "value": { - "register": "rn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "DUP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmov_n_f32", - "arguments": [ - "float32_t value" - ], - "return_type": { - "value": "float32x2_t" - }, - "Arguments_Preparation": { - "value": { - "register": "rn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "DUP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmov_n_f64", - "arguments": [ - "float64_t value" - ], - "return_type": { - "value": "float64x1_t" - }, - "Arguments_Preparation": { - "value": { - "register": "rn" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "DUP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmov_n_p16", - "arguments": [ - "poly16_t value" - ], - "return_type": { - "value": "poly16x4_t" - }, - "Arguments_Preparation": { - "value": { - "register": "rn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "DUP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmov_n_p8", - "arguments": [ - "poly8_t value" + "float32_t c" ], "return_type": { - "value": "poly8x8_t" + "value": "float32x4_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "N/A" + }, + "b": { + "register": "N/A" + }, + "c": { + "register": "N/A" } }, "Architectures": [ @@ -54713,22 +256639,30 @@ ], "instructions": [ [ - "DUP" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmov_n_s16", + "name": "vmlaq_n_s16", "arguments": [ - "int16_t value" + "int16x8_t a", + "int16x8_t b", + "int16_t c" ], "return_type": { - "value": "int16x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ @@ -54738,22 +256672,30 @@ ], "instructions": [ [ - "DUP" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmov_n_s32", + "name": "vmlaq_n_s32", "arguments": [ - "int32_t value" + "int32x4_t a", + "int32x4_t b", + "int32_t c" ], "return_type": { - "value": "int32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ @@ -54763,22 +256705,30 @@ ], "instructions": [ [ - "DUP" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmov_n_s64", + "name": "vmlaq_n_u16", "arguments": [ - "int64_t value" + "uint16x8_t a", + "uint16x8_t b", + "uint16_t c" ], "return_type": { - "value": "int64x1_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ @@ -54788,22 +256738,30 @@ ], "instructions": [ [ - "DUP" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmov_n_s8", + "name": "vmlaq_n_u32", "arguments": [ - "int8_t value" + "uint32x4_t a", + "uint32x4_t b", + "uint32_t c" ], "return_type": { - "value": "int8x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ @@ -54813,22 +256771,30 @@ ], "instructions": [ [ - "DUP" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmov_n_u16", + "name": "vmlaq_s16", "arguments": [ - "uint16_t value" + "int16x8_t a", + "int16x8_t b", + "int16x8_t c" ], "return_type": { - "value": "uint16x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.8H" } }, "Architectures": [ @@ -54838,22 +256804,30 @@ ], "instructions": [ [ - "DUP" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmov_n_u32", + "name": "vmlaq_s32", "arguments": [ - "uint32_t value" + "int32x4_t a", + "int32x4_t b", + "int32x4_t c" ], "return_type": { - "value": "uint32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" } }, "Architectures": [ @@ -54863,22 +256837,30 @@ ], "instructions": [ [ - "DUP" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmov_n_u64", + "name": "vmlaq_s8", "arguments": [ - "uint64_t value" + "int8x16_t a", + "int8x16_t b", + "int8x16_t c" ], "return_type": { - "value": "uint64x1_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" } }, "Architectures": [ @@ -54888,22 +256870,30 @@ ], "instructions": [ [ - "DUP" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmov_n_u8", + "name": "vmlaq_u16", "arguments": [ - "uint8_t value" + "uint16x8_t a", + "uint16x8_t b", + "uint16x8_t c" ], "return_type": { - "value": "uint8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.8H" } }, "Architectures": [ @@ -54913,91 +256903,129 @@ ], "instructions": [ [ - "DUP" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_high_s16", + "name": "vmlaq_u32", "arguments": [ - "int16x8_t a" + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SSHLL2" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_high_s32", + "name": "vmlaq_u8", "arguments": [ - "int32x4_t a" + "uint8x16_t a", + "uint8x16_t b", + "uint8x16_t c" ], "return_type": { - "value": "int64x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SSHLL2" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_high_s8", + "name": "vmls_f32", "arguments": [ - "int8x16_t a" + "float32x2_t a", + "float32x2_t b", + "float32x2_t c" ], "return_type": { - "value": "int16x8_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "N/A" + }, + "b": { + "register": "N/A" + }, + "c": { + "register": "N/A" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SSHLL2" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_high_u16", + "name": "vmls_f64", "arguments": [ - "uint16x8_t a" + "float64x1_t a", + "float64x1_t b", + "float64x1_t c" ], "return_type": { - "value": "uint32x4_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "N/A" + }, + "b": { + "register": "N/A" + }, + "c": { + "register": "N/A" } }, "Architectures": [ @@ -55005,68 +257033,102 @@ ], "instructions": [ [ - "USHLL2" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_high_u32", + "name": "vmls_lane_f32", "arguments": [ - "uint32x4_t a" + "float32x2_t a", + "float32x2_t b", + "float32x2_t v", + "const int lane" ], "return_type": { - "value": "uint64x2_t" + "value": "float32x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "lane": { + "minimum": 0, + "maximum": 1 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USHLL2" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_high_u8", + "name": "vmls_lane_s16", "arguments": [ - "uint8x16_t a" + "int16x4_t a", + "int16x4_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vd.4H" + }, + "b": { + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USHLL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_s16", + "name": "vmls_lane_s32", "arguments": [ - "int16x4_t a" + "int32x2_t a", + "int32x2_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -55076,22 +257138,35 @@ ], "instructions": [ [ - "SSHLL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_s32", + "name": "vmls_lane_u16", "arguments": [ - "int32x2_t a" + "uint16x4_t a", + "uint16x4_t b", + "uint16x4_t v", + "const int lane" ], "return_type": { - "value": "int64x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.4H" + }, + "b": { + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -55101,22 +257176,35 @@ ], "instructions": [ [ - "SSHLL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_s8", + "name": "vmls_lane_u32", "arguments": [ - "int8x8_t a" + "uint32x2_t a", + "uint32x2_t b", + "uint32x2_t v", + "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -55126,159 +257214,201 @@ ], "instructions": [ [ - "SSHLL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_u16", + "name": "vmls_laneq_f32", "arguments": [ - "uint16x4_t a" + "float32x2_t a", + "float32x2_t b", + "float32x4_t v", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "lane": { + "minimum": 0, + "maximum": 3 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USHLL" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_u32", + "name": "vmls_laneq_s16", "arguments": [ - "uint32x2_t a" + "int16x4_t a", + "int16x4_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "uint64x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.4H" + }, + "b": { + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USHLL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_u8", + "name": "vmls_laneq_s32", "arguments": [ - "uint8x8_t a" + "int32x2_t a", + "int32x2_t b", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USHLL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_high_s16", + "name": "vmls_laneq_u16", "arguments": [ - "int8x8_t r", - "int16x8_t a" + "uint16x4_t a", + "uint16x4_t b", + "uint16x8_t v", + "const int lane" ], "return_type": { - "value": "int8x16_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.4H" }, - "r": { - "register": "Vd.8B" + "b": { + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "XTN2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_high_s32", + "name": "vmls_laneq_u32", "arguments": [ - "int16x4_t r", - "int32x4_t a" + "uint32x2_t a", + "uint32x2_t b", + "uint32x4_t v", + "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.2S" }, - "r": { - "register": "Vd.4H" + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "XTN2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_high_s64", + "name": "vmls_n_f32", "arguments": [ - "int32x2_t r", - "int64x2_t a" + "float32x2_t a", + "float32x2_t b", + "float32_t c" ], "return_type": { - "value": "int32x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "N/A" }, - "r": { - "register": "Vd.2S" + "b": { + "register": "N/A" + }, + "c": { + "register": "N/A" } }, "Architectures": [ @@ -55288,26 +257418,30 @@ ], "instructions": [ [ - "XTN2" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_high_u16", + "name": "vmls_n_s16", "arguments": [ - "uint8x8_t r", - "uint16x8_t a" + "int16x4_t a", + "int16x4_t b", + "int16_t c" ], "return_type": { - "value": "uint8x16_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.4H" }, - "r": { - "register": "Vd.8B" + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ @@ -55317,26 +257451,30 @@ ], "instructions": [ [ - "XTN2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_high_u32", + "name": "vmls_n_s32", "arguments": [ - "uint16x4_t r", - "uint32x4_t a" + "int32x2_t a", + "int32x2_t b", + "int32_t c" ], "return_type": { - "value": "uint16x8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.2S" }, - "r": { - "register": "Vd.4H" + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ @@ -55346,26 +257484,30 @@ ], "instructions": [ [ - "XTN2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_high_u64", + "name": "vmls_n_u16", "arguments": [ - "uint32x2_t r", - "uint64x2_t a" + "uint16x4_t a", + "uint16x4_t b", + "uint16_t c" ], "return_type": { - "value": "uint32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.4H" }, - "r": { - "register": "Vd.2S" + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ @@ -55375,22 +257517,30 @@ ], "instructions": [ [ - "XTN2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_s16", + "name": "vmls_n_u32", "arguments": [ - "int16x8_t a" + "uint32x2_t a", + "uint32x2_t b", + "uint32_t c" ], "return_type": { - "value": "int8x8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ @@ -55400,22 +257550,30 @@ ], "instructions": [ [ - "XTN" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_s32", + "name": "vmls_s16", "arguments": [ - "int32x4_t a" + "int16x4_t a", + "int16x4_t b", + "int16x4_t c" ], "return_type": { "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.4H" + }, + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.4H" } }, "Architectures": [ @@ -55425,22 +257583,30 @@ ], "instructions": [ [ - "XTN" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_s64", + "name": "vmls_s32", "arguments": [ - "int64x2_t a" + "int32x2_t a", + "int32x2_t b", + "int32x2_t c" ], "return_type": { "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.2S" } }, "Architectures": [ @@ -55450,22 +257616,30 @@ ], "instructions": [ [ - "XTN" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_u16", + "name": "vmls_s8", "arguments": [ - "uint16x8_t a" + "int8x8_t a", + "int8x8_t b", + "int8x8_t c" ], "return_type": { - "value": "uint8x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" } }, "Architectures": [ @@ -55475,22 +257649,30 @@ ], "instructions": [ [ - "XTN" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_u32", + "name": "vmls_u16", "arguments": [ - "uint32x4_t a" + "uint16x4_t a", + "uint16x4_t b", + "uint16x4_t c" ], "return_type": { "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.4H" + }, + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.4H" } }, "Architectures": [ @@ -55500,22 +257682,30 @@ ], "instructions": [ [ - "XTN" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_u64", + "name": "vmls_u32", "arguments": [ - "uint64x2_t a" + "uint32x2_t a", + "uint32x2_t b", + "uint32x2_t c" ], "return_type": { "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.2S" } }, "Architectures": [ @@ -55525,22 +257715,30 @@ ], "instructions": [ [ - "XTN" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_f16", + "name": "vmls_u8", "arguments": [ - "float16_t value" + "uint8x8_t a", + "uint8x8_t b", + "uint8x8_t c" ], "return_type": { - "value": "float16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" } }, "Architectures": [ @@ -55550,47 +257748,71 @@ ], "instructions": [ [ - "DUP" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_f32", + "name": "vmlsl_high_lane_s16", "arguments": [ - "float32_t value" + "int32x4_t a", + "int16x8_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "float32x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "SMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_f64", + "name": "vmlsl_high_lane_s32", "arguments": [ - "float64_t value" + "int64x2_t a", + "int32x4_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "float64x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -55598,333 +257820,432 @@ ], "instructions": [ [ - "DUP" + "SMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_p16", + "name": "vmlsl_high_lane_u16", "arguments": [ - "poly16_t value" + "uint32x4_t a", + "uint16x8_t b", + "uint16x4_t v", + "const int lane" ], "return_type": { - "value": "poly16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "UMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_p8", + "name": "vmlsl_high_lane_u32", "arguments": [ - "poly8_t value" + "uint64x2_t a", + "uint32x4_t b", + "uint32x2_t v", + "const int lane" ], "return_type": { - "value": "poly8x16_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "UMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_s16", + "name": "vmlsl_high_laneq_s16", "arguments": [ - "int16_t value" + "int32x4_t a", + "int16x8_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "SMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_s32", + "name": "vmlsl_high_laneq_s32", "arguments": [ - "int32_t value" + "int64x2_t a", + "int32x4_t b", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "SMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_s64", + "name": "vmlsl_high_laneq_u16", "arguments": [ - "int64_t value" + "uint32x4_t a", + "uint16x8_t b", + "uint16x8_t v", + "const int lane" ], "return_type": { - "value": "int64x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "UMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_s8", + "name": "vmlsl_high_laneq_u32", "arguments": [ - "int8_t value" + "uint64x2_t a", + "uint32x4_t b", + "uint32x4_t v", + "const int lane" ], "return_type": { - "value": "int8x16_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "UMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_u16", + "name": "vmlsl_high_n_s16", "arguments": [ - "uint16_t value" + "int32x4_t a", + "int16x8_t b", + "int16_t c" ], "return_type": { - "value": "uint16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "SMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_u32", + "name": "vmlsl_high_n_s32", "arguments": [ - "uint32_t value" + "int64x2_t a", + "int32x4_t b", + "int32_t c" ], "return_type": { - "value": "uint32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "SMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_u64", + "name": "vmlsl_high_n_u16", "arguments": [ - "uint64_t value" + "uint32x4_t a", + "uint16x8_t b", + "uint16_t c" ], "return_type": { - "value": "uint64x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "UMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_u8", + "name": "vmlsl_high_n_u32", "arguments": [ - "uint8_t value" + "uint64x2_t a", + "uint32x4_t b", + "uint32_t c" ], "return_type": { - "value": "uint8x16_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "UMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_f16", + "name": "vmlsl_high_s16", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "int32x4_t a", + "int16x8_t b", + "int16x8_t c" ], "return_type": { - "value": "float16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.4S" }, "b": { - "register": "Vm.4H" + "register": "Vn.8H" + }, + "c": { + "register": "Vm.8H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMUL" + "SMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_f32", + "name": "vmlsl_high_s32", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "int64x2_t a", + "int32x4_t b", + "int32x4_t c" ], "return_type": { - "value": "float32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.2D" }, "b": { - "register": "Vm.2S" + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FMUL" + "SMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_f64", + "name": "vmlsl_high_s8", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "int16x8_t a", + "int8x16_t b", + "int8x16_t c" ], "return_type": { - "value": "float64x1_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.8H" }, "b": { - "register": "Dm" + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" } }, "Architectures": [ @@ -55932,98 +258253,92 @@ ], "instructions": [ [ - "FMUL" + "SMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_lane_f16", + "name": "vmlsl_high_u16", "arguments": [ - "float16x4_t a", - "float16x4_t v", - "const int lane" + "uint32x4_t a", + "uint16x8_t b", + "uint16x8_t c" ], "return_type": { - "value": "float16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.4S" }, - "lane": { - "minimum": 0, - "maximum": 3 + "b": { + "register": "Vn.8H" }, - "v": { - "register": "Vm.4H" + "c": { + "register": "Vm.8H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMUL" + "UMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_lane_f32", + "name": "vmlsl_high_u32", "arguments": [ - "float32x2_t a", - "float32x2_t v", - "const int lane" + "uint64x2_t a", + "uint32x4_t b", + "uint32x4_t c" ], "return_type": { - "value": "float32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.2D" }, - "lane": { - "minimum": 0, - "maximum": 1 + "b": { + "register": "Vn.4S" }, - "v": { - "register": "Vm.2S" + "c": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FMUL" + "UMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_lane_f64", + "name": "vmlsl_high_u8", "arguments": [ - "float64x1_t a", - "float64x1_t v", - "const int lane" + "uint16x8_t a", + "uint8x16_t b", + "uint8x16_t c" ], "return_type": { - "value": "float64x1_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.8H" }, - "lane": { - "minimum": 0, - "maximum": 0 + "b": { + "register": "Vn.16B" }, - "v": { - "register": "Vm.1D" + "c": { + "register": "Vm.16B" } }, "Architectures": [ @@ -56031,23 +258346,27 @@ ], "instructions": [ [ - "FMUL" + "UMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_lane_s16", + "name": "vmlsl_lane_s16", "arguments": [ - "int16x4_t a", + "int32x4_t a", + "int16x4_t b", "int16x4_t v", "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.4S" + }, + "b": { "register": "Vn.4H" }, "lane": { @@ -56065,23 +258384,27 @@ ], "instructions": [ [ - "MUL" + "SMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_lane_s32", + "name": "vmlsl_lane_s32", "arguments": [ - "int32x2_t a", + "int64x2_t a", + "int32x2_t b", "int32x2_t v", "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.2D" + }, + "b": { "register": "Vn.2S" }, "lane": { @@ -56099,23 +258422,27 @@ ], "instructions": [ [ - "MUL" + "SMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_lane_u16", + "name": "vmlsl_lane_u16", "arguments": [ - "uint16x4_t a", + "uint32x4_t a", + "uint16x4_t b", "uint16x4_t v", "const int lane" ], "return_type": { - "value": "uint16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.4S" + }, + "b": { "register": "Vn.4H" }, "lane": { @@ -56133,23 +258460,27 @@ ], "instructions": [ [ - "MUL" + "UMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_lane_u32", + "name": "vmlsl_lane_u32", "arguments": [ - "uint32x2_t a", + "uint64x2_t a", + "uint32x2_t b", "uint32x2_t v", "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.2D" + }, + "b": { "register": "Vn.2S" }, "lane": { @@ -56167,23 +258498,27 @@ ], "instructions": [ [ - "MUL" + "UMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_laneq_f16", + "name": "vmlsl_laneq_s16", "arguments": [ - "float16x4_t a", - "float16x8_t v", + "int32x4_t a", + "int16x4_t b", + "int16x8_t v", "const int lane" ], "return_type": { - "value": "float16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.4S" + }, + "b": { "register": "Vn.4H" }, "lane": { @@ -56199,23 +258534,27 @@ ], "instructions": [ [ - "FMUL" + "SMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_laneq_f32", + "name": "vmlsl_laneq_s32", "arguments": [ - "float32x2_t a", - "float32x4_t v", + "int64x2_t a", + "int32x2_t b", + "int32x4_t v", "const int lane" ], "return_type": { - "value": "float32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.2D" + }, + "b": { "register": "Vn.2S" }, "lane": { @@ -56231,55 +258570,27 @@ ], "instructions": [ [ - "FMUL" + "SMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_laneq_f64", + "name": "vmlsl_laneq_u16", "arguments": [ - "float64x1_t a", - "float64x2_t v", + "uint32x4_t a", + "uint16x4_t b", + "uint16x8_t v", "const int lane" ], "return_type": { - "value": "float64x1_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "lane": { - "minimum": 0, - "maximum": 1 + "register": "Vd.4S" }, - "v": { - "register": "Vm.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FMUL" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmul_laneq_s16", - "arguments": [ - "int16x4_t a", - "int16x8_t v", - "const int lane" - ], - "return_type": { - "value": "int16x4_t" - }, - "Arguments_Preparation": { - "a": { + "b": { "register": "Vn.4H" }, "lane": { @@ -56295,23 +258606,27 @@ ], "instructions": [ [ - "MUL" + "UMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_laneq_s32", + "name": "vmlsl_laneq_u32", "arguments": [ - "int32x2_t a", - "int32x4_t v", + "uint64x2_t a", + "uint32x2_t b", + "uint32x4_t v", "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.2D" + }, + "b": { "register": "Vn.2S" }, "lane": { @@ -56327,117 +258642,128 @@ ], "instructions": [ [ - "MUL" + "UMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_laneq_u16", + "name": "vmlsl_n_s16", "arguments": [ - "uint16x4_t a", - "uint16x8_t v", - "const int lane" + "int32x4_t a", + "int16x4_t b", + "int16_t c" ], "return_type": { - "value": "uint16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.4S" }, - "lane": { - "minimum": 0, - "maximum": 7 + "b": { + "register": "Vn.4H" }, - "v": { - "register": "Vm.8H" + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "MUL" + "SMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_laneq_u32", + "name": "vmlsl_n_s32", "arguments": [ - "uint32x2_t a", - "uint32x4_t v", - "const int lane" + "int64x2_t a", + "int32x2_t b", + "int32_t c" ], "return_type": { - "value": "uint32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.2D" }, - "lane": { - "minimum": 0, - "maximum": 3 + "b": { + "register": "Vn.2S" }, - "v": { - "register": "Vm.4S" + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "MUL" + "SMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_n_f16", + "name": "vmlsl_n_u16", "arguments": [ - "float16x4_t a", - "float16_t n" + "uint32x4_t a", + "uint16x4_t b", + "uint16_t c" ], "return_type": { - "value": "float16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.4S" + }, + "b": { "register": "Vn.4H" }, - "n": { + "c": { "register": "Vm.H[0]" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FMUL" + "UMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_n_f32", + "name": "vmlsl_n_u32", "arguments": [ - "float32x2_t a", - "float32_t b" + "uint64x2_t a", + "uint32x2_t b", + "uint32_t c" ], "return_type": { - "value": "float32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.2D" }, "b": { + "register": "Vn.2S" + }, + "c": { "register": "Vm.S[0]" } }, @@ -56448,53 +258774,63 @@ ], "instructions": [ [ - "FMUL" + "UMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_n_f64", + "name": "vmlsl_s16", "arguments": [ - "float64x1_t a", - "float64_t b" + "int32x4_t a", + "int16x4_t b", + "int16x4_t c" ], "return_type": { - "value": "float64x1_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.4S" }, "b": { - "register": "Vm.D[0]" + "register": "Vn.4H" + }, + "c": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMUL" + "SMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_n_s16", + "name": "vmlsl_s32", "arguments": [ - "int16x4_t a", - "int16_t b" + "int64x2_t a", + "int32x2_t b", + "int32x2_t c" ], "return_type": { - "value": "int16x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.2D" }, "b": { - "register": "Vm.H[0]" + "register": "Vn.2S" + }, + "c": { + "register": "Vm.2S" } }, "Architectures": [ @@ -56504,26 +258840,30 @@ ], "instructions": [ [ - "MUL" + "SMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_n_s32", + "name": "vmlsl_s8", "arguments": [ - "int32x2_t a", - "int32_t b" + "int16x8_t a", + "int8x8_t b", + "int8x8_t c" ], "return_type": { - "value": "int32x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.8H" }, "b": { - "register": "Vm.S[0]" + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" } }, "Architectures": [ @@ -56533,26 +258873,30 @@ ], "instructions": [ [ - "MUL" + "SMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_n_u16", + "name": "vmlsl_u16", "arguments": [ - "uint16x4_t a", - "uint16_t b" + "uint32x4_t a", + "uint16x4_t b", + "uint16x4_t c" ], "return_type": { - "value": "uint16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.4S" }, "b": { - "register": "Vm.H[0]" + "register": "Vn.4H" + }, + "c": { + "register": "Vm.4H" } }, "Architectures": [ @@ -56562,26 +258906,30 @@ ], "instructions": [ [ - "MUL" + "UMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_n_u32", + "name": "vmlsl_u32", "arguments": [ - "uint32x2_t a", - "uint32_t b" + "uint64x2_t a", + "uint32x2_t b", + "uint32x2_t c" ], "return_type": { - "value": "uint32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.2D" }, "b": { - "register": "Vm.S[0]" + "register": "Vn.2S" + }, + "c": { + "register": "Vm.2S" } }, "Architectures": [ @@ -56591,25 +258939,29 @@ ], "instructions": [ [ - "MUL" + "UMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_p8", + "name": "vmlsl_u8", "arguments": [ - "poly8x8_t a", - "poly8x8_t b" + "uint16x8_t a", + "uint8x8_t b", + "uint8x8_t c" ], "return_type": { - "value": "poly8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vd.8H" }, "b": { + "register": "Vn.8B" + }, + "c": { "register": "Vm.8B" } }, @@ -56620,26 +258972,30 @@ ], "instructions": [ [ - "PMUL" + "UMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_s16", + "name": "vmlsq_f32", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "float32x4_t a", + "float32x4_t b", + "float32x4_t c" ], "return_type": { - "value": "int16x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "N/A" }, "b": { - "register": "Vm.4H" + "register": "N/A" + }, + "c": { + "register": "N/A" } }, "Architectures": [ @@ -56649,55 +259005,57 @@ ], "instructions": [ [ - "MUL" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_s32", + "name": "vmlsq_f64", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "float64x2_t a", + "float64x2_t b", + "float64x2_t c" ], "return_type": { - "value": "int32x2_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "N/A" }, "b": { - "register": "Vm.2S" + "register": "N/A" + }, + "c": { + "register": "N/A" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MUL" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_s8", + "name": "vmlsq_lane_f32", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "float32x4_t a", + "float32x4_t b", + "float32x2_t v", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "float32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "lane": { + "minimum": 0, + "maximum": 1 } }, "Architectures": [ @@ -56707,25 +259065,34 @@ ], "instructions": [ [ - "MUL" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_u16", + "name": "vmlsq_lane_s16", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "int16x8_t a", + "int16x8_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint16x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.8H" }, "b": { + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { "register": "Vm.4H" } }, @@ -56736,25 +259103,34 @@ ], "instructions": [ [ - "MUL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_u32", + "name": "vmlsq_lane_s32", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "int32x4_t a", + "int32x4_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.4S" }, "b": { + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { "register": "Vm.2S" } }, @@ -56765,26 +259141,35 @@ ], "instructions": [ [ - "MUL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_u8", + "name": "vmlsq_lane_u16", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "uint16x8_t a", + "uint16x8_t b", + "uint16x4_t v", + "const int lane" ], "return_type": { - "value": "uint8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vd.8H" }, "b": { - "register": "Vm.8B" + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -56794,63 +259179,64 @@ ], "instructions": [ [ - "MUL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmuld_lane_f64", + "name": "vmlsq_lane_u32", "arguments": [ - "float64_t a", - "float64x1_t v", + "uint32x4_t a", + "uint32x4_t b", + "uint32x2_t v", "const int lane" ], "return_type": { - "value": "float64_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" }, "lane": { "minimum": 0, - "maximum": 0 + "maximum": 1 }, "v": { - "register": "Vm.1D" + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMUL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmuld_laneq_f64", + "name": "vmlsq_laneq_f32", "arguments": [ - "float64_t a", - "float64x2_t v", + "float32x4_t a", + "float32x4_t b", + "float32x4_t v", "const int lane" ], "return_type": { - "value": "float64_t" + "value": "float32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" - }, "lane": { "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2D" + "maximum": 3 } }, "Architectures": [ @@ -56858,59 +259244,71 @@ ], "instructions": [ [ - "FMUL" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulh_f16", + "name": "vmlsq_laneq_s16", "arguments": [ - "float16_t a", - "float16_t b" + "int16x8_t a", + "int16x8_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "float16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.8H" }, "b": { - "register": "Hm" + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMUL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulh_lane_f16", + "name": "vmlsq_laneq_s32", "arguments": [ - "float16_t a", - "float16x4_t v", + "int32x4_t a", + "int32x4_t b", + "int32x4_t v", "const int lane" ], "return_type": { - "value": "float16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" }, "lane": { "minimum": 0, "maximum": 3 }, "v": { - "register": "Vm.4H" + "register": "Vm.4S" } }, "Architectures": [ @@ -56918,24 +259316,28 @@ ], "instructions": [ [ - "FMUL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulh_laneq_f16", + "name": "vmlsq_laneq_u16", "arguments": [ - "float16_t a", - "float16x8_t v", + "uint16x8_t a", + "uint16x8_t b", + "uint16x8_t v", "const int lane" ], "return_type": { - "value": "float16_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8H" }, "lane": { "minimum": 0, @@ -56950,31 +259352,35 @@ ], "instructions": [ [ - "FMUL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_lane_s16", + "name": "vmlsq_laneq_u32", "arguments": [ - "int16x8_t a", - "int16x4_t v", + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t v", "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" }, "lane": { "minimum": 0, "maximum": 3 }, "v": { - "register": "Vm.4H" + "register": "Vm.4S" } }, "Architectures": [ @@ -56982,358 +259388,393 @@ ], "instructions": [ [ - "SMULL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_lane_s32", + "name": "vmlsq_n_f32", "arguments": [ - "int32x4_t a", - "int32x2_t v", - "const int lane" + "float32x4_t a", + "float32x4_t b", + "float32_t c" ], "return_type": { - "value": "int64x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "N/A" }, - "lane": { - "minimum": 0, - "maximum": 1 + "b": { + "register": "N/A" }, - "v": { - "register": "Vm.2S" + "c": { + "register": "N/A" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMULL2" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_lane_u16", + "name": "vmlsq_n_s16", "arguments": [ - "uint16x8_t a", - "uint16x4_t v", - "const int lane" + "int16x8_t a", + "int16x8_t b", + "int16_t c" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.8H" }, - "lane": { - "minimum": 0, - "maximum": 3 + "b": { + "register": "Vn.8H" }, - "v": { - "register": "Vm.4H" + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMULL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_lane_u32", + "name": "vmlsq_n_s32", "arguments": [ - "uint32x4_t a", - "uint32x2_t v", - "const int lane" + "int32x4_t a", + "int32x4_t b", + "int32_t c" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.4S" }, - "lane": { - "minimum": 0, - "maximum": 1 + "b": { + "register": "Vn.4S" }, - "v": { - "register": "Vm.2S" + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMULL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_laneq_s16", + "name": "vmlsq_n_u16", "arguments": [ - "int16x8_t a", - "int16x8_t v", - "const int lane" + "uint16x8_t a", + "uint16x8_t b", + "uint16_t c" ], "return_type": { - "value": "int32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.8H" }, - "lane": { - "minimum": 0, - "maximum": 7 + "b": { + "register": "Vn.8H" }, - "v": { - "register": "Vm.8H" + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMULL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_laneq_s32", + "name": "vmlsq_n_u32", "arguments": [ - "int32x4_t a", - "int32x4_t v", - "const int lane" + "uint32x4_t a", + "uint32x4_t b", + "uint32_t c" ], "return_type": { - "value": "int64x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.4S" }, - "lane": { - "minimum": 0, - "maximum": 3 + "b": { + "register": "Vn.4S" }, - "v": { - "register": "Vm.4S" + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMULL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_laneq_u16", + "name": "vmlsq_s16", "arguments": [ - "uint16x8_t a", - "uint16x8_t v", - "const int lane" + "int16x8_t a", + "int16x8_t b", + "int16x8_t c" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.8H" }, - "lane": { - "minimum": 0, - "maximum": 7 + "b": { + "register": "Vn.8H" }, - "v": { + "c": { "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMULL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_laneq_u32", + "name": "vmlsq_s32", "arguments": [ - "uint32x4_t a", - "uint32x4_t v", - "const int lane" + "int32x4_t a", + "int32x4_t b", + "int32x4_t c" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.4S" }, - "lane": { - "minimum": 0, - "maximum": 3 + "b": { + "register": "Vn.4S" }, - "v": { + "c": { "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMULL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_n_s16", + "name": "vmlsq_s8", "arguments": [ - "int16x8_t a", - "int16_t b" + "int8x16_t a", + "int8x16_t b", + "int8x16_t c" ], "return_type": { - "value": "int32x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.16B" }, "b": { - "register": "Vm.H[0]" + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMULL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_n_s32", + "name": "vmlsq_u16", "arguments": [ - "int32x4_t a", - "int32_t b" + "uint16x8_t a", + "uint16x8_t b", + "uint16x8_t c" ], "return_type": { - "value": "int64x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.8H" }, "b": { - "register": "Vm.S[0]" + "register": "Vn.8H" + }, + "c": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMULL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_n_u16", + "name": "vmlsq_u32", "arguments": [ - "uint16x8_t a", - "uint16_t b" + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c" ], "return_type": { "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.4S" }, "b": { - "register": "Vm.H[0]" + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMULL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_n_u32", + "name": "vmlsq_u8", "arguments": [ - "uint32x4_t a", - "uint32_t b" + "uint8x16_t a", + "uint8x16_t b", + "uint8x16_t c" ], "return_type": { - "value": "uint64x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.16B" }, "b": { - "register": "Vm.S[0]" + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMULL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_p64", + "name": "vmmlaq_s32", "arguments": [ - "poly64x2_t a", - "poly64x2_t b" + "int32x4_t r", + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "poly128_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.16B" }, "b": { - "register": "Vm.2D" + "register": "Vm.16B" + }, + "r": { + "register": "Vd.4S" } }, "Architectures": [ @@ -57342,19 +259783,20 @@ ], "instructions": [ [ - "PMULL2" + "SMMLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_p8", + "name": "vmmlaq_u32", "arguments": [ - "poly8x16_t a", - "poly8x16_t b" + "uint32x4_t r", + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "poly16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { @@ -57362,200 +259804,231 @@ }, "b": { "register": "Vm.16B" + }, + "r": { + "register": "Vd.4S" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "PMULL2" + "UMMLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_s16", + "name": "vmov_n_f16", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "float16_t value" ], "return_type": { - "value": "int32x4_t" + "value": "float16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMULL2" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_s32", + "name": "vmov_n_f32", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "float32_t value" ], "return_type": { - "value": "int64x2_t" + "value": "float32x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMULL2" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_s8", + "name": "vmov_n_f64", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "float64_t value" ], "return_type": { - "value": "int16x8_t" + "value": "float64x1_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" + "value": { + "register": "rn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vmov_n_p16", + "arguments": [ + "poly16_t value" + ], + "return_type": { + "value": "poly16x4_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vmov_n_p8", + "arguments": [ + "poly8_t value" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMULL2" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_u16", + "name": "vmov_n_s16", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "int16_t value" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMULL2" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_u32", + "name": "vmov_n_s32", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int32_t value" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMULL2" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_u8", + "name": "vmov_n_s64", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "int64_t value" ], "return_type": { - "value": "uint16x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMULL2" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_lane_s16", + "name": "vmov_n_s8", "arguments": [ - "int16x4_t a", - "int16x4_t v", - "const int lane" + "int8_t value" ], "return_type": { - "value": "int32x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "value": { + "register": "rn" } }, "Architectures": [ @@ -57565,31 +260038,22 @@ ], "instructions": [ [ - "SMULL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_lane_s32", + "name": "vmov_n_u16", "arguments": [ - "int32x2_t a", - "int32x2_t v", - "const int lane" + "uint16_t value" ], "return_type": { - "value": "int64x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "value": { + "register": "rn" } }, "Architectures": [ @@ -57599,31 +260063,22 @@ ], "instructions": [ [ - "SMULL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_lane_u16", + "name": "vmov_n_u32", "arguments": [ - "uint16x4_t a", - "uint16x4_t v", - "const int lane" + "uint32_t value" ], "return_type": { - "value": "uint32x4_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "value": { + "register": "rn" } }, "Architectures": [ @@ -57633,31 +260088,22 @@ ], "instructions": [ [ - "UMULL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_lane_u32", + "name": "vmov_n_u64", "arguments": [ - "uint32x2_t a", - "uint32x2_t v", - "const int lane" + "uint64_t value" ], "return_type": { - "value": "uint64x2_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "value": { + "register": "rn" } }, "Architectures": [ @@ -57667,63 +260113,47 @@ ], "instructions": [ [ - "UMULL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_laneq_s16", + "name": "vmov_n_u8", "arguments": [ - "int16x4_t a", - "int16x8_t v", - "const int lane" + "uint8_t value" ], "return_type": { - "value": "int32x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMULL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_laneq_s32", + "name": "vmovl_high_s16", "arguments": [ - "int32x2_t a", - "int32x4_t v", - "const int lane" + "int16x8_t a" ], "return_type": { - "value": "int64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vn.8H" } }, "Architectures": [ @@ -57731,31 +260161,22 @@ ], "instructions": [ [ - "SMULL" + "SSHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_laneq_u16", + "name": "vmovl_high_s32", "arguments": [ - "uint16x4_t a", - "uint16x8_t v", - "const int lane" + "int32x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vn.4S" } }, "Architectures": [ @@ -57763,31 +260184,22 @@ ], "instructions": [ [ - "UMULL" + "SSHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_laneq_u32", + "name": "vmovl_high_s8", "arguments": [ - "uint32x2_t a", - "uint32x4_t v", - "const int lane" + "int8x16_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vn.16B" } }, "Architectures": [ @@ -57795,113 +260207,91 @@ ], "instructions": [ [ - "UMULL" + "SSHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_n_s16", + "name": "vmovl_high_u16", "arguments": [ - "int16x4_t a", - "int16_t b" + "uint16x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.H[0]" + "register": "Vn.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMULL" + "USHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_n_s32", + "name": "vmovl_high_u32", "arguments": [ - "int32x2_t a", - "int32_t b" + "uint32x4_t a" ], "return_type": { - "value": "int64x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.S[0]" + "register": "Vn.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMULL" + "USHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_n_u16", + "name": "vmovl_high_u8", "arguments": [ - "uint16x4_t a", - "uint16_t b" + "uint8x16_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.H[0]" + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMULL" + "USHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_n_u32", + "name": "vmovl_s16", "arguments": [ - "uint32x2_t a", - "uint32_t b" + "int16x4_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.S[0]" + "register": "Vn.4H" } }, "Architectures": [ @@ -57911,54 +260301,47 @@ ], "instructions": [ [ - "UMULL" + "SSHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_p64", + "name": "vmovl_s32", "arguments": [ - "poly64_t a", - "poly64_t b" + "int32x2_t a" ], "return_type": { - "value": "poly128_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.1D" - }, - "b": { - "register": "Vm.1D" + "register": "Vn.2S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "PMULL" + "SSHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_p8", + "name": "vmovl_s8", "arguments": [ - "poly8x8_t a", - "poly8x8_t b" + "int8x8_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" } }, "Architectures": [ @@ -57968,26 +260351,22 @@ ], "instructions": [ [ - "PMULL" + "SSHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_s16", + "name": "vmovl_u16", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "uint16x4_t a" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" } }, "Architectures": [ @@ -57997,26 +260376,22 @@ ], "instructions": [ [ - "SMULL" + "USHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_s32", + "name": "vmovl_u32", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "uint32x2_t a" ], "return_type": { - "value": "int64x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" } }, "Architectures": [ @@ -58026,26 +260401,22 @@ ], "instructions": [ [ - "SMULL" + "USHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_s8", + "name": "vmovl_u8", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "uint8x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" } }, "Architectures": [ @@ -58055,26 +260426,26 @@ ], "instructions": [ [ - "SMULL" + "USHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_u16", + "name": "vmovn_high_s16", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "int8x8_t r", + "int16x8_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.8H" }, - "b": { - "register": "Vm.4H" + "r": { + "register": "Vd.8B" } }, "Architectures": [ @@ -58084,26 +260455,26 @@ ], "instructions": [ [ - "UMULL" + "XTN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_u32", + "name": "vmovn_high_s32", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "int16x4_t r", + "int32x4_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.4S" }, - "b": { - "register": "Vm.2S" + "r": { + "register": "Vd.4H" } }, "Architectures": [ @@ -58113,26 +260484,26 @@ ], "instructions": [ [ - "UMULL" + "XTN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_u8", + "name": "vmovn_high_s64", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "int32x2_t r", + "int64x2_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.2D" }, - "b": { - "register": "Vm.8B" + "r": { + "register": "Vd.2S" } }, "Architectures": [ @@ -58142,54 +260513,55 @@ ], "instructions": [ [ - "UMULL" + "XTN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_f16", + "name": "vmovn_high_u16", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "uint8x8_t r", + "uint16x8_t a" ], "return_type": { - "value": "float16x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" }, - "b": { - "register": "Vm.8H" + "r": { + "register": "Vd.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FMUL" + "XTN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_f32", + "name": "vmovn_high_u32", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "uint16x4_t r", + "uint32x4_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4S" }, - "b": { - "register": "Vm.4S" + "r": { + "register": "Vd.4H" } }, "Architectures": [ @@ -58199,91 +260571,76 @@ ], "instructions": [ [ - "FMUL" + "XTN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_f64", + "name": "vmovn_high_u64", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "uint32x2_t r", + "uint64x2_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2D" }, - "b": { - "register": "Vm.2D" + "r": { + "register": "Vd.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMUL" + "XTN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_lane_f16", + "name": "vmovn_s16", "arguments": [ - "float16x8_t a", - "float16x4_t v", - "const int lane" + "int16x8_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FMUL" + "XTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_lane_f32", + "name": "vmovn_s32", "arguments": [ - "float32x4_t a", - "float32x2_t v", - "const int lane" + "int32x4_t a" ], "return_type": { - "value": "float32x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" } }, "Architectures": [ @@ -58293,63 +260650,47 @@ ], "instructions": [ [ - "FMUL" + "XTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_lane_f64", + "name": "vmovn_s64", "arguments": [ - "float64x2_t a", - "float64x1_t v", - "const int lane" + "int64x2_t a" ], "return_type": { - "value": "float64x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2D" - }, - "lane": { - "minimum": 0, - "maximum": 0 - }, - "v": { - "register": "Vm.1D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMUL" + "XTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_lane_s16", + "name": "vmovn_u16", "arguments": [ - "int16x8_t a", - "int16x4_t v", - "const int lane" + "uint16x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" } }, "Architectures": [ @@ -58359,31 +260700,22 @@ ], "instructions": [ [ - "MUL" + "XTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_lane_s32", + "name": "vmovn_u32", "arguments": [ - "int32x4_t a", - "int32x2_t v", - "const int lane" + "uint32x4_t a" ], "return_type": { - "value": "int32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" } }, "Architectures": [ @@ -58393,31 +260725,22 @@ ], "instructions": [ [ - "MUL" + "XTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_lane_u16", + "name": "vmovn_u64", "arguments": [ - "uint16x8_t a", - "uint16x4_t v", - "const int lane" + "uint64x2_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "register": "Vn.2D" } }, "Architectures": [ @@ -58427,31 +260750,22 @@ ], "instructions": [ [ - "MUL" + "XTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_lane_u32", + "name": "vmovq_n_f16", "arguments": [ - "uint32x4_t a", - "uint32x2_t v", - "const int lane" + "float16_t value" ], "return_type": { - "value": "uint32x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "value": { + "register": "rn" } }, "Architectures": [ @@ -58461,63 +260775,47 @@ ], "instructions": [ [ - "MUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_laneq_f16", + "name": "vmovq_n_f32", "arguments": [ - "float16x8_t a", - "float16x8_t v", - "const int lane" + "float32_t value" ], "return_type": { - "value": "float16x8_t" + "value": "float32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_laneq_f32", + "name": "vmovq_n_f64", "arguments": [ - "float32x4_t a", - "float32x4_t v", - "const int lane" + "float64_t value" ], "return_type": { - "value": "float32x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "value": { + "register": "rn" } }, "Architectures": [ @@ -58525,214 +260823,172 @@ ], "instructions": [ [ - "FMUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_laneq_f64", + "name": "vmovq_n_p16", "arguments": [ - "float64x2_t a", - "float64x2_t v", - "const int lane" + "poly16_t value" ], "return_type": { - "value": "float64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2D" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_laneq_s16", + "name": "vmovq_n_p8", "arguments": [ - "int16x8_t a", - "int16x8_t v", - "const int lane" + "poly8_t value" ], "return_type": { - "value": "int16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "MUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_laneq_s32", + "name": "vmovq_n_s16", "arguments": [ - "int32x4_t a", - "int32x4_t v", - "const int lane" + "int16_t value" ], "return_type": { - "value": "int32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "MUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_laneq_u16", + "name": "vmovq_n_s32", "arguments": [ - "uint16x8_t a", - "uint16x8_t v", - "const int lane" + "int32_t value" ], "return_type": { - "value": "uint16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "MUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_laneq_u32", + "name": "vmovq_n_s64", "arguments": [ - "uint32x4_t a", - "uint32x4_t v", - "const int lane" + "int64_t value" ], "return_type": { - "value": "uint32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "MUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_n_f16", + "name": "vmovq_n_s8", "arguments": [ - "float16x8_t a", - "float16_t n" + "int8_t value" ], "return_type": { - "value": "float16x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "n": { - "register": "Vm.H[0]" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FMUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_n_f32", + "name": "vmovq_n_u16", "arguments": [ - "float32x4_t a", - "float32_t b" + "uint16_t value" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.S[0]" + "value": { + "register": "rn" } }, "Architectures": [ @@ -58742,53 +260998,47 @@ ], "instructions": [ [ - "FMUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_n_f64", + "name": "vmovq_n_u32", "arguments": [ - "float64x2_t a", - "float64_t b" + "uint32_t value" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.D[0]" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_n_s16", + "name": "vmovq_n_u64", "arguments": [ - "int16x8_t a", - "int16_t b" + "uint64_t value" ], "return_type": { - "value": "int16x8_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.H[0]" + "value": { + "register": "rn" } }, "Architectures": [ @@ -58798,26 +261048,22 @@ ], "instructions": [ [ - "MUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_n_s32", + "name": "vmovq_n_u8", "arguments": [ - "int32x4_t a", - "int32_t b" + "uint8_t value" ], "return_type": { - "value": "int32x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.S[0]" + "value": { + "register": "rn" } }, "Architectures": [ @@ -58827,55 +261073,54 @@ ], "instructions": [ [ - "MUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_n_u16", + "name": "vmul_f16", "arguments": [ - "uint16x8_t a", - "uint16_t b" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" }, "b": { - "register": "Vm.H[0]" + "register": "Vm.4H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "MUL" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_n_u32", + "name": "vmul_f32", "arguments": [ - "uint32x4_t a", - "uint32_t b" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2S" }, "b": { - "register": "Vm.S[0]" + "register": "Vm.2S" } }, "Architectures": [ @@ -58885,84 +261130,91 @@ ], "instructions": [ [ - "MUL" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_p8", + "name": "vmul_f64", "arguments": [ - "poly8x16_t a", - "poly8x16_t b" + "float64x1_t a", + "float64x1_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Dn" }, "b": { - "register": "Vm.16B" + "register": "Dm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "PMUL" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_s16", + "name": "vmul_lane_f16", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "float16x4_t a", + "float16x4_t v", + "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" }, - "b": { - "register": "Vm.8H" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "MUL" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_s32", + "name": "vmul_lane_f32", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "float32x2_t a", + "float32x2_t v", + "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2S" }, - "b": { - "register": "Vm.4S" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -58972,55 +261224,63 @@ ], "instructions": [ [ - "MUL" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_s8", + "name": "vmul_lane_f64", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "float64x1_t a", + "float64x1_t v", + "const int lane" ], "return_type": { - "value": "int8x16_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Dn" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 0 + }, + "v": { + "register": "Vm.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MUL" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_u16", + "name": "vmul_lane_s16", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "int16x4_t a", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" }, - "b": { - "register": "Vm.8H" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -59036,20 +261296,25 @@ }, { "SIMD_ISA": "Neon", - "name": "vmulq_u32", + "name": "vmul_lane_s32", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int32x2_t a", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2S" }, - "b": { - "register": "Vm.4S" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -59065,20 +261330,25 @@ }, { "SIMD_ISA": "Neon", - "name": "vmulq_u8", + "name": "vmul_lane_u16", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "uint16x4_t a", + "uint16x4_t v", + "const int lane" ], "return_type": { - "value": "uint8x16_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4H" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -59094,18 +261364,18 @@ }, { "SIMD_ISA": "Neon", - "name": "vmuls_lane_f32", + "name": "vmul_lane_u32", "arguments": [ - "float32_t a", - "float32x2_t v", + "uint32x2_t a", + "uint32x2_t v", "const int lane" ], "return_type": { - "value": "float32_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vn.2S" }, "lane": { "minimum": 0, @@ -59116,35 +261386,37 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMUL" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmuls_laneq_f32", + "name": "vmul_laneq_f16", "arguments": [ - "float32_t a", - "float32x4_t v", + "float16x4_t a", + "float16x8_t v", "const int lane" ], "return_type": { - "value": "float32_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vn.4H" }, "lane": { "minimum": 0, - "maximum": 3 + "maximum": 7 }, "v": { - "register": "Vm.4S" + "register": "Vm.8H" } }, "Architectures": [ @@ -59158,20 +261430,25 @@ }, { "SIMD_ISA": "Neon", - "name": "vmulx_f16", + "name": "vmul_laneq_f32", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "float32x2_t a", + "float32x4_t v", + "const int lane" ], "return_type": { - "value": "float16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.4H" + "v": { + "register": "Vm.4S" } }, "Architectures": [ @@ -59179,26 +261456,31 @@ ], "instructions": [ [ - "FMULX" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulx_f32", + "name": "vmul_laneq_f64", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "float64x1_t a", + "float64x2_t v", + "const int lane" ], "return_type": { - "value": "float32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Dn" }, - "b": { - "register": "Vm.2S" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2D" } }, "Architectures": [ @@ -59206,26 +261488,31 @@ ], "instructions": [ [ - "FMULX" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulx_f64", + "name": "vmul_laneq_s16", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "int16x4_t a", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "float64x1_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.4H" }, - "b": { - "register": "Dm" + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ @@ -59233,31 +261520,31 @@ ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulx_lane_f16", + "name": "vmul_laneq_s32", "arguments": [ - "float16x4_t a", - "float16x4_t v", + "int32x2_t a", + "int32x4_t v", "const int lane" ], "return_type": { - "value": "float16x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.2S" }, "lane": { "minimum": 0, "maximum": 3 }, "v": { - "register": "Vm.4H" + "register": "Vm.4S" } }, "Architectures": [ @@ -59265,31 +261552,31 @@ ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulx_lane_f32", + "name": "vmul_laneq_u16", "arguments": [ - "float32x2_t a", - "float32x2_t v", + "uint16x4_t a", + "uint16x8_t v", "const int lane" ], "return_type": { - "value": "float32x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.4H" }, "lane": { "minimum": 0, - "maximum": 1 + "maximum": 7 }, "v": { - "register": "Vm.2S" + "register": "Vm.8H" } }, "Architectures": [ @@ -59297,31 +261584,31 @@ ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulx_lane_f64", + "name": "vmul_laneq_u32", "arguments": [ - "float64x1_t a", - "float64x1_t v", + "uint32x2_t a", + "uint32x4_t v", "const int lane" ], "return_type": { - "value": "float64x1_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.2S" }, "lane": { "minimum": 0, - "maximum": 0 + "maximum": 3 }, "v": { - "register": "Vm.1D" + "register": "Vm.4S" } }, "Architectures": [ @@ -59329,17 +261616,16 @@ ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulx_laneq_f16", + "name": "vmul_n_f16", "arguments": [ "float16x4_t a", - "float16x8_t v", - "const int lane" + "float16_t n" ], "return_type": { "value": "float16x4_t" @@ -59348,30 +261634,26 @@ "a": { "register": "Vn.4H" }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "n": { + "register": "Vm.H[0]" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FMULX" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulx_laneq_f32", + "name": "vmul_n_f32", "arguments": [ "float32x2_t a", - "float32x4_t v", - "const int lane" + "float32_t b" ], "return_type": { "value": "float32x2_t" @@ -59380,30 +261662,27 @@ "a": { "register": "Vn.2S" }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "b": { + "register": "Vm.S[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulx_laneq_f64", + "name": "vmul_n_f64", "arguments": [ "float64x1_t a", - "float64x2_t v", - "const int lane" + "float64_t b" ], "return_type": { "value": "float64x1_t" @@ -59412,12 +261691,8 @@ "a": { "register": "Dn" }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2D" + "b": { + "register": "Vm.D[0]" } }, "Architectures": [ @@ -59425,353 +261700,350 @@ ], "instructions": [ [ - "FMULX" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulx_n_f16", + "name": "vmul_n_s16", "arguments": [ - "float16x4_t a", - "float16_t n" + "int16x4_t a", + "int16_t b" ], "return_type": { - "value": "float16x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4H" }, - "n": { + "b": { "register": "Vm.H[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxd_f64", + "name": "vmul_n_s32", "arguments": [ - "float64_t a", - "float64_t b" + "int32x2_t a", + "int32_t b" ], "return_type": { - "value": "float64_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.2S" }, "b": { - "register": "Dm" + "register": "Vm.S[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxd_lane_f64", + "name": "vmul_n_u16", "arguments": [ - "float64_t a", - "float64x1_t v", - "const int lane" + "uint16x4_t a", + "uint16_t b" ], "return_type": { - "value": "float64_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "lane": { - "minimum": 0, - "maximum": 0 + "register": "Vn.4H" }, - "v": { - "register": "Vm.1D" + "b": { + "register": "Vm.H[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxd_laneq_f64", + "name": "vmul_n_u32", "arguments": [ - "float64_t a", - "float64x2_t v", - "const int lane" + "uint32x2_t a", + "uint32_t b" ], "return_type": { - "value": "float64_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "lane": { - "minimum": 0, - "maximum": 1 + "register": "Vn.2S" }, - "v": { - "register": "Vm.2D" + "b": { + "register": "Vm.S[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxh_f16", + "name": "vmul_p8", "arguments": [ - "float16_t a", - "float16_t b" + "poly8x8_t a", + "poly8x8_t b" ], "return_type": { - "value": "float16_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vn.8B" }, "b": { - "register": "Hm" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "PMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxh_lane_f16", + "name": "vmul_s16", "arguments": [ - "float16_t a", - "float16x4_t v", - "const int lane" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "float16_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Vn.4H" }, - "v": { + "b": { "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxh_laneq_f16", + "name": "vmul_s32", "arguments": [ - "float16_t a", - "float16x8_t v", - "const int lane" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "float16_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" - }, - "lane": { - "minimum": 0, - "maximum": 7 + "register": "Vn.2S" }, - "v": { - "register": "Vm.8H" + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxq_f16", + "name": "vmul_s8", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" }, "b": { - "register": "Vm.8H" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxq_f32", + "name": "vmul_u16", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.4H" }, "b": { - "register": "Vm.4S" + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxq_f64", + "name": "vmul_u32", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.2S" }, "b": { - "register": "Vm.2D" + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxq_lane_f16", + "name": "vmul_u8", "arguments": [ - "float16x8_t a", - "float16x4_t v", - "const int lane" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "float16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Vn.8B" }, - "v": { - "register": "Vm.4H" + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxq_lane_f32", + "name": "vmuld_lane_f64", "arguments": [ - "float32x4_t a", - "float32x2_t v", + "float64_t a", + "float64x1_t v", "const int lane" ], "return_type": { - "value": "float32x4_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Dn" }, "lane": { "minimum": 0, - "maximum": 1 + "maximum": 0 }, "v": { - "register": "Vm.2S" + "register": "Vm.1D" } }, "Architectures": [ @@ -59779,31 +262051,31 @@ ], "instructions": [ [ - "FMULX" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxq_lane_f64", + "name": "vmuld_laneq_f64", "arguments": [ - "float64x2_t a", - "float64x1_t v", + "float64_t a", + "float64x2_t v", "const int lane" ], "return_type": { - "value": "float64x2_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Dn" }, "lane": { "minimum": 0, - "maximum": 0 + "maximum": 1 }, "v": { - "register": "Vm.1D" + "register": "Vm.2D" } }, "Architectures": [ @@ -59811,63 +262083,59 @@ ], "instructions": [ [ - "FMULX" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxq_laneq_f16", + "name": "vmulh_f16", "arguments": [ - "float16x8_t a", - "float16x8_t v", - "const int lane" + "float16_t a", + "float16_t b" ], "return_type": { - "value": "float16x8_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 7 + "register": "Hn" }, - "v": { - "register": "Vm.8H" + "b": { + "register": "Hm" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FMULX" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxq_laneq_f32", + "name": "vmulh_lane_f16", "arguments": [ - "float32x4_t a", - "float32x4_t v", + "float16_t a", + "float16x4_t v", "const int lane" ], "return_type": { - "value": "float32x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Hn" }, "lane": { "minimum": 0, "maximum": 3 }, "v": { - "register": "Vm.4S" + "register": "Vm.4H" } }, "Architectures": [ @@ -59875,31 +262143,31 @@ ], "instructions": [ [ - "FMULX" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxq_laneq_f64", + "name": "vmulh_laneq_f16", "arguments": [ - "float64x2_t a", - "float64x2_t v", + "float16_t a", + "float16x8_t v", "const int lane" ], "return_type": { - "value": "float64x2_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Hn" }, "lane": { "minimum": 0, - "maximum": 1 + "maximum": 7 }, "v": { - "register": "Vm.2D" + "register": "Vm.8H" } }, "Architectures": [ @@ -59907,26 +262175,31 @@ ], "instructions": [ [ - "FMULX" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxq_n_f16", + "name": "vmull_high_lane_s16", "arguments": [ - "float16x8_t a", - "float16_t n" + "int16x8_t a", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "float16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" }, - "n": { - "register": "Vm.H[0]" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -59934,26 +262207,31 @@ ], "instructions": [ [ - "FMULX" + "SMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxs_f32", + "name": "vmull_high_lane_s32", "arguments": [ - "float32_t a", - "float32_t b" + "int32x4_t a", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "float32_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vn.4S" }, - "b": { - "register": "Sm" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -59961,31 +262239,31 @@ ], "instructions": [ [ - "FMULX" + "SMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxs_lane_f32", + "name": "vmull_high_lane_u16", "arguments": [ - "float32_t a", - "float32x2_t v", + "uint16x8_t a", + "uint16x4_t v", "const int lane" ], "return_type": { - "value": "float32_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vn.8H" }, "lane": { "minimum": 0, - "maximum": 1 + "maximum": 3 }, "v": { - "register": "Vm.2S" + "register": "Vm.4H" } }, "Architectures": [ @@ -59993,31 +262271,31 @@ ], "instructions": [ [ - "FMULX" + "UMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxs_laneq_f32", + "name": "vmull_high_lane_u32", "arguments": [ - "float32_t a", - "float32x4_t v", + "uint32x4_t a", + "uint32x2_t v", "const int lane" ], "return_type": { - "value": "float32_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vn.4S" }, "lane": { "minimum": 0, - "maximum": 3 + "maximum": 1 }, "v": { - "register": "Vm.4S" + "register": "Vm.2S" } }, "Architectures": [ @@ -60025,444 +262303,518 @@ ], "instructions": [ [ - "FMULX" + "UMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvn_p8", + "name": "vmull_high_laneq_s16", "arguments": [ - "poly8x8_t a" + "int16x8_t a", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "poly8x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "SMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvn_s16", + "name": "vmull_high_laneq_s32", "arguments": [ - "int16x4_t a" + "int32x4_t a", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "SMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvn_s32", + "name": "vmull_high_laneq_u16", "arguments": [ - "int32x2_t a" + "uint16x8_t a", + "uint16x8_t v", + "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "UMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvn_s8", + "name": "vmull_high_laneq_u32", "arguments": [ - "int8x8_t a" + "uint32x4_t a", + "uint32x4_t v", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "UMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvn_u16", + "name": "vmull_high_n_s16", "arguments": [ - "uint16x4_t a" + "int16x8_t a", + "int16_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "SMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvn_u32", + "name": "vmull_high_n_s32", "arguments": [ - "uint32x2_t a" + "int32x4_t a", + "int32_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "SMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvn_u8", + "name": "vmull_high_n_u16", "arguments": [ - "uint8x8_t a" + "uint16x8_t a", + "uint16_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "UMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvnq_p8", + "name": "vmull_high_n_u32", "arguments": [ - "poly8x16_t a" + "uint32x4_t a", + "uint32_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "UMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvnq_s16", + "name": "vmull_high_p64", "arguments": [ - "int16x8_t a" + "poly64x2_t a", + "poly64x2_t b" ], "return_type": { - "value": "int16x8_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "MVN" + "PMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvnq_s32", + "name": "vmull_high_p8", "arguments": [ - "int32x4_t a" + "poly8x16_t a", + "poly8x16_t b" ], "return_type": { - "value": "int32x4_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "PMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvnq_s8", + "name": "vmull_high_s16", "arguments": [ - "int8x16_t a" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int8x16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "SMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvnq_u16", + "name": "vmull_high_s32", "arguments": [ - "uint16x8_t a" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "SMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvnq_u32", + "name": "vmull_high_s8", "arguments": [ - "uint32x4_t a" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "SMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvnq_u8", + "name": "vmull_high_u16", "arguments": [ - "uint8x16_t a" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "UMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vneg_f16", + "name": "vmull_high_u32", "arguments": [ - "float16x4_t a" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "float16x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FNEG" + "UMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vneg_f32", + "name": "vmull_high_u8", "arguments": [ - "float32x2_t a" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "float32x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FNEG" + "UMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vneg_f64", + "name": "vmull_lane_s16", "arguments": [ - "float64x1_t a" + "int16x4_t a", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "float64x1_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FNEG" + "SMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vneg_s16", + "name": "vmull_lane_s32", "arguments": [ - "int16x4_t a" + "int32x2_t a", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -60472,22 +262824,31 @@ ], "instructions": [ [ - "NEG" + "SMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vneg_s32", + "name": "vmull_lane_u16", "arguments": [ - "int32x2_t a" + "uint16x4_t a", + "uint16x4_t v", + "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -60497,70 +262858,97 @@ ], "instructions": [ [ - "NEG" + "UMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vneg_s64", + "name": "vmull_lane_u32", "arguments": [ - "int64x1_t a" + "uint32x2_t a", + "uint32x2_t v", + "const int lane" ], "return_type": { - "value": "int64x1_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "NEG" + "UMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vneg_s8", + "name": "vmull_laneq_s16", "arguments": [ - "int8x8_t a" + "int16x4_t a", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NEG" + "SMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vnegd_s64", + "name": "vmull_laneq_s32", "arguments": [ - "int64_t a" + "int32x2_t a", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "int64_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ @@ -60568,70 +262956,90 @@ ], "instructions": [ [ - "NEG" + "SMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vnegh_f16", + "name": "vmull_laneq_u16", "arguments": [ - "float16_t a" + "uint16x4_t a", + "uint16x8_t v", + "const int lane" ], "return_type": { - "value": "float16_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FNEG" + "UMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vnegq_f16", + "name": "vmull_laneq_u32", "arguments": [ - "float16x8_t a" + "uint32x2_t a", + "uint32x4_t v", + "const int lane" ], "return_type": { - "value": "float16x8_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FNEG" + "UMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vnegq_f32", + "name": "vmull_n_s16", "arguments": [ - "float32x4_t a" + "int16x4_t a", + "int16_t b" ], "return_type": { - "value": "float32x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H[0]" } }, "Architectures": [ @@ -60641,45 +263049,55 @@ ], "instructions": [ [ - "FNEG" + "SMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vnegq_f64", + "name": "vmull_n_s32", "arguments": [ - "float64x2_t a" + "int32x2_t a", + "int32_t b" ], "return_type": { - "value": "float64x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FNEG" + "SMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vnegq_s16", + "name": "vmull_n_u16", "arguments": [ - "int16x8_t a" + "uint16x4_t a", + "uint16_t b" ], "return_type": { - "value": "int16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H[0]" } }, "Architectures": [ @@ -60689,22 +263107,26 @@ ], "instructions": [ [ - "NEG" + "UMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vnegq_s32", + "name": "vmull_n_u32", "arguments": [ - "int32x4_t a" + "uint32x2_t a", + "uint32_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S[0]" } }, "Architectures": [ @@ -60714,45 +263136,54 @@ ], "instructions": [ [ - "NEG" + "UMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vnegq_s64", + "name": "vmull_p64", "arguments": [ - "int64x2_t a" + "poly64_t a", + "poly64_t b" ], "return_type": { - "value": "int64x2_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.1D" + }, + "b": { + "register": "Vm.1D" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "NEG" + "PMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vnegq_s8", + "name": "vmull_p8", "arguments": [ - "int8x16_t a" + "poly8x8_t a", + "poly8x8_t b" ], "return_type": { - "value": "int8x16_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" } }, "Architectures": [ @@ -60762,26 +263193,26 @@ ], "instructions": [ [ - "NEG" + "PMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorn_s16", + "name": "vmull_s16", "arguments": [ "int16x4_t a", "int16x4_t b" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4H" }, "b": { - "register": "Vm.8B" + "register": "Vm.4H" } }, "Architectures": [ @@ -60791,26 +263222,26 @@ ], "instructions": [ [ - "ORN" + "SMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorn_s32", + "name": "vmull_s32", "arguments": [ "int32x2_t a", "int32x2_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.2S" }, "b": { - "register": "Vm.8B" + "register": "Vm.2S" } }, "Architectures": [ @@ -60820,19 +263251,19 @@ ], "instructions": [ [ - "ORN" + "SMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorn_s64", + "name": "vmull_s8", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "int64x1_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -60849,26 +263280,26 @@ ], "instructions": [ [ - "ORN" + "SMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorn_s8", + "name": "vmull_u16", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "int8x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4H" }, "b": { - "register": "Vm.8B" + "register": "Vm.4H" } }, "Architectures": [ @@ -60878,26 +263309,26 @@ ], "instructions": [ [ - "ORN" + "UMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorn_u16", + "name": "vmull_u32", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.2S" }, "b": { - "register": "Vm.8B" + "register": "Vm.2S" } }, "Architectures": [ @@ -60907,19 +263338,19 @@ ], "instructions": [ [ - "ORN" + "UMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorn_u32", + "name": "vmull_u8", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { @@ -60936,55 +263367,54 @@ ], "instructions": [ [ - "ORN" + "UMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorn_u64", + "name": "vmulq_f16", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "uint64x1_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, "b": { - "register": "Vm.8B" + "register": "Vm.8H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "ORN" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorn_u8", + "name": "vmulq_f32", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4S" }, "b": { - "register": "Vm.8B" + "register": "Vm.4S" } }, "Architectures": [ @@ -60994,84 +263424,91 @@ ], "instructions": [ [ - "ORN" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vornq_s16", + "name": "vmulq_f64", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.2D" }, "b": { - "register": "Vm.16B" + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ORN" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vornq_s32", + "name": "vmulq_lane_f16", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "float16x8_t a", + "float16x4_t v", + "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "ORN" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vornq_s64", + "name": "vmulq_lane_f32", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "float32x4_t a", + "float32x2_t v", + "const int lane" ], "return_type": { - "value": "int64x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -61081,55 +263518,63 @@ ], "instructions": [ [ - "ORN" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vornq_s8", + "name": "vmulq_lane_f64", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "float64x2_t a", + "float64x1_t v", + "const int lane" ], "return_type": { - "value": "int8x16_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.2D" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 0 + }, + "v": { + "register": "Vm.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ORN" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vornq_u16", + "name": "vmulq_lane_s16", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "int16x8_t a", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -61139,26 +263584,31 @@ ], "instructions": [ [ - "ORN" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vornq_u32", + "name": "vmulq_lane_s32", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int32x4_t a", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -61168,26 +263618,31 @@ ], "instructions": [ [ - "ORN" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vornq_u64", + "name": "vmulq_lane_u16", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "uint16x8_t a", + "uint16x4_t v", + "const int lane" ], "return_type": { - "value": "uint64x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -61197,26 +263652,31 @@ ], "instructions": [ [ - "ORN" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vornq_u8", + "name": "vmulq_lane_u32", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "uint32x4_t a", + "uint32x2_t v", + "const int lane" ], "return_type": { - "value": "uint8x16_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -61226,258 +263686,278 @@ ], "instructions": [ [ - "ORN" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorr_s16", + "name": "vmulq_laneq_f16", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "float16x8_t a", + "float16x8_t v", + "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, - "b": { - "register": "Vm.8B" + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ORR" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorr_s32", + "name": "vmulq_laneq_f32", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "float32x4_t a", + "float32x4_t v", + "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4S" }, - "b": { - "register": "Vm.8B" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ORR" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorr_s64", + "name": "vmulq_laneq_f64", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "float64x2_t a", + "float64x2_t v", + "const int lane" ], "return_type": { - "value": "int64x1_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.2D" }, - "b": { - "register": "Vm.8B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ORR" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorr_s8", + "name": "vmulq_laneq_s16", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "int16x8_t a", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, - "b": { - "register": "Vm.8B" + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ORR" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorr_u16", + "name": "vmulq_laneq_s32", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "int32x4_t a", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "uint16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4S" }, - "b": { - "register": "Vm.8B" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ORR" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorr_u32", + "name": "vmulq_laneq_u16", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "uint16x8_t a", + "uint16x8_t v", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, - "b": { - "register": "Vm.8B" + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ORR" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorr_u64", + "name": "vmulq_laneq_u32", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "uint32x4_t a", + "uint32x4_t v", + "const int lane" ], "return_type": { - "value": "uint64x1_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4S" }, - "b": { - "register": "Vm.8B" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ORR" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorr_u8", + "name": "vmulq_n_f16", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "float16x8_t a", + "float16_t n" ], "return_type": { - "value": "uint8x8_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, - "b": { - "register": "Vm.8B" + "n": { + "register": "Vm.H[0]" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "ORR" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorrq_s16", + "name": "vmulq_n_f32", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "float32x4_t a", + "float32_t b" ], "return_type": { - "value": "int16x8_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" }, "b": { - "register": "Vm.16B" + "register": "Vm.S[0]" } }, "Architectures": [ @@ -61487,55 +263967,53 @@ ], "instructions": [ [ - "ORR" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorrq_s32", + "name": "vmulq_n_f64", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "float64x2_t a", + "float64_t b" ], "return_type": { - "value": "int32x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.2D" }, "b": { - "register": "Vm.16B" + "register": "Vm.D[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ORR" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorrq_s64", + "name": "vmulq_n_s16", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "int16x8_t a", + "int16_t b" ], "return_type": { - "value": "int64x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, "b": { - "register": "Vm.16B" + "register": "Vm.H[0]" } }, "Architectures": [ @@ -61545,26 +264023,26 @@ ], "instructions": [ [ - "ORR" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorrq_s8", + "name": "vmulq_n_s32", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "int32x4_t a", + "int32_t b" ], "return_type": { - "value": "int8x16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" }, "b": { - "register": "Vm.16B" + "register": "Vm.S[0]" } }, "Architectures": [ @@ -61574,26 +264052,26 @@ ], "instructions": [ [ - "ORR" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorrq_u16", + "name": "vmulq_n_u16", "arguments": [ "uint16x8_t a", - "uint16x8_t b" + "uint16_t b" ], "return_type": { "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, "b": { - "register": "Vm.16B" + "register": "Vm.H[0]" } }, "Architectures": [ @@ -61603,26 +264081,26 @@ ], "instructions": [ [ - "ORR" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorrq_u32", + "name": "vmulq_n_u32", "arguments": [ "uint32x4_t a", - "uint32x4_t b" + "uint32_t b" ], "return_type": { "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" }, "b": { - "register": "Vm.16B" + "register": "Vm.S[0]" } }, "Architectures": [ @@ -61632,19 +264110,19 @@ ], "instructions": [ [ - "ORR" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorrq_u64", + "name": "vmulq_p8", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "poly8x16_t a", + "poly8x16_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { @@ -61661,26 +264139,26 @@ ], "instructions": [ [ - "ORR" + "PMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorrq_u8", + "name": "vmulq_s16", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, "b": { - "register": "Vm.16B" + "register": "Vm.8H" } }, "Architectures": [ @@ -61690,26 +264168,26 @@ ], "instructions": [ [ - "ORR" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadal_s16", + "name": "vmulq_s32", "arguments": [ - "int32x2_t a", - "int16x4_t b" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.4S" }, "b": { - "register": "Vn.4H" + "register": "Vm.4S" } }, "Architectures": [ @@ -61719,26 +264197,26 @@ ], "instructions": [ [ - "SADALP" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadal_s32", + "name": "vmulq_s8", "arguments": [ - "int64x1_t a", - "int32x2_t b" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "int64x1_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vn.16B" }, "b": { - "register": "Vn.2S" + "register": "Vm.16B" } }, "Architectures": [ @@ -61748,26 +264226,26 @@ ], "instructions": [ [ - "SADALP" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadal_s8", + "name": "vmulq_u16", "arguments": [ - "int16x4_t a", - "int8x8_t b" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int16x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.8H" }, "b": { - "register": "Vn.8B" + "register": "Vm.8H" } }, "Architectures": [ @@ -61777,26 +264255,26 @@ ], "instructions": [ [ - "SADALP" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadal_u16", + "name": "vmulq_u32", "arguments": [ - "uint32x2_t a", - "uint16x4_t b" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.4S" }, "b": { - "register": "Vn.4H" + "register": "Vm.4S" } }, "Architectures": [ @@ -61806,26 +264284,26 @@ ], "instructions": [ [ - "UADALP" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadal_u32", + "name": "vmulq_u8", "arguments": [ - "uint64x1_t a", - "uint32x2_t b" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "uint64x1_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vn.16B" }, "b": { - "register": "Vn.2S" + "register": "Vm.16B" } }, "Architectures": [ @@ -61835,456 +264313,481 @@ ], "instructions": [ [ - "UADALP" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadal_u8", + "name": "vmuls_lane_f32", "arguments": [ - "uint16x4_t a", - "uint8x8_t b" + "float32_t a", + "float32x2_t v", + "const int lane" ], "return_type": { - "value": "uint16x4_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Sn" }, - "b": { - "register": "Vn.8B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADALP" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadalq_s16", + "name": "vmuls_laneq_f32", "arguments": [ - "int32x4_t a", - "int16x8_t b" + "float32_t a", + "float32x4_t v", + "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Sn" }, - "b": { - "register": "Vn.8H" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADALP" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadalq_s32", + "name": "vmulx_f16", "arguments": [ - "int64x2_t a", - "int32x4_t b" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "int64x2_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.4H" }, "b": { - "register": "Vn.4S" + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADALP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadalq_s8", + "name": "vmulx_f32", "arguments": [ - "int16x8_t a", - "int8x16_t b" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "int16x8_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vn.2S" }, "b": { - "register": "Vn.16B" + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADALP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadalq_u16", + "name": "vmulx_f64", "arguments": [ - "uint32x4_t a", - "uint16x8_t b" + "float64x1_t a", + "float64x1_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Dn" }, "b": { - "register": "Vn.8H" + "register": "Dm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADALP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadalq_u32", + "name": "vmulx_lane_f16", "arguments": [ - "uint64x2_t a", - "uint32x4_t b" + "float16x4_t a", + "float16x4_t v", + "const int lane" ], "return_type": { - "value": "uint64x2_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.4H" }, - "b": { - "register": "Vn.4S" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADALP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadalq_u8", + "name": "vmulx_lane_f32", "arguments": [ - "uint16x8_t a", - "uint8x16_t b" + "float32x2_t a", + "float32x2_t v", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vn.2S" }, - "b": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADALP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadd_f16", + "name": "vmulx_lane_f64", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "float64x1_t a", + "float64x1_t v", + "const int lane" ], "return_type": { - "value": "float16x4_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Dn" }, - "b": { - "register": "Vm.4H" + "lane": { + "minimum": 0, + "maximum": 0 + }, + "v": { + "register": "Vm.1D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadd_f32", + "name": "vmulx_laneq_f16", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "float16x4_t a", + "float16x8_t v", + "const int lane" ], "return_type": { - "value": "float32x2_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.4H" }, - "b": { - "register": "Vm.2S" + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadd_s16", + "name": "vmulx_laneq_f32", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "float32x2_t a", + "float32x4_t v", + "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.2S" }, - "b": { - "register": "Vm.4H" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadd_s32", + "name": "vmulx_laneq_f64", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "float64x1_t a", + "float64x2_t v", + "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Dn" }, - "b": { - "register": "Vm.2S" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadd_s8", + "name": "vmulx_n_f16", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "float16x4_t a", + "float16_t n" ], "return_type": { - "value": "int8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4H" }, - "b": { - "register": "Vm.8B" + "n": { + "register": "Vm.H[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadd_u16", + "name": "vmulxd_f64", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "float64_t a", + "float64_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Dn" }, "b": { - "register": "Vm.4H" + "register": "Dm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadd_u32", + "name": "vmulxd_lane_f64", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "float64_t a", + "float64x1_t v", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Dn" }, - "b": { - "register": "Vm.2S" + "lane": { + "minimum": 0, + "maximum": 0 + }, + "v": { + "register": "Vm.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadd_u8", + "name": "vmulxd_laneq_f64", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "float64_t a", + "float64x2_t v", + "const int lane" ], "return_type": { - "value": "uint8x8_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Dn" }, - "b": { - "register": "Vm.8B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddd_f64", + "name": "vmulxh_f16", "arguments": [ - "float64x2_t a" + "float16_t a", + "float16_t b" ], "return_type": { - "value": "float64_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Hn" + }, + "b": { + "register": "Hm" } }, "Architectures": [ @@ -62292,22 +264795,31 @@ ], "instructions": [ [ - "FADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddd_s64", + "name": "vmulxh_lane_f16", "arguments": [ - "int64x2_t a" + "float16_t a", + "float16x4_t v", + "const int lane" ], "return_type": { - "value": "int64_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Hn" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -62315,22 +264827,31 @@ ], "instructions": [ [ - "ADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddd_u64", + "name": "vmulxh_laneq_f16", "arguments": [ - "uint64x2_t a" + "float16_t a", + "float16x8_t v", + "const int lane" ], "return_type": { - "value": "uint64_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Hn" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ @@ -62338,326 +264859,390 @@ ], "instructions": [ [ - "ADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddl_s16", + "name": "vmulxq_f16", "arguments": [ - "int16x4_t a" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "int32x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddl_s32", + "name": "vmulxq_f32", "arguments": [ - "int32x2_t a" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "int64x1_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddl_s8", + "name": "vmulxq_f64", "arguments": [ - "int8x8_t a" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "int16x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddl_u16", + "name": "vmulxq_lane_f16", "arguments": [ - "uint16x4_t a" + "float16x8_t a", + "float16x4_t v", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddl_u32", + "name": "vmulxq_lane_f32", "arguments": [ - "uint32x2_t a" + "float32x4_t a", + "float32x2_t v", + "const int lane" ], "return_type": { - "value": "uint64x1_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddl_u8", + "name": "vmulxq_lane_f64", "arguments": [ - "uint8x8_t a" + "float64x2_t a", + "float64x1_t v", + "const int lane" ], "return_type": { - "value": "uint16x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.2D" + }, + "lane": { + "minimum": 0, + "maximum": 0 + }, + "v": { + "register": "Vm.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddlq_s16", + "name": "vmulxq_laneq_f16", "arguments": [ - "int16x8_t a" + "float16x8_t a", + "float16x8_t v", + "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddlq_s32", + "name": "vmulxq_laneq_f32", "arguments": [ - "int32x4_t a" + "float32x4_t a", + "float32x4_t v", + "const int lane" ], "return_type": { - "value": "int64x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddlq_s8", + "name": "vmulxq_laneq_f64", "arguments": [ - "int8x16_t a" + "float64x2_t a", + "float64x2_t v", + "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.2D" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddlq_u16", + "name": "vmulxq_n_f16", "arguments": [ - "uint16x8_t a" + "float16x8_t a", + "float16_t n" ], "return_type": { - "value": "uint32x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" + }, + "n": { + "register": "Vm.H[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddlq_u32", + "name": "vmulxs_f32", "arguments": [ - "uint32x4_t a" + "float32_t a", + "float32_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Sn" + }, + "b": { + "register": "Sm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddlq_u8", + "name": "vmulxs_lane_f32", "arguments": [ - "uint8x16_t a" + "float32_t a", + "float32x2_t v", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Sn" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddq_f16", + "name": "vmulxs_laneq_f32", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "float32_t a", + "float32x4_t v", + "const int lane" ], "return_type": { - "value": "float16x8_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Sn" }, - "b": { - "register": "Vm.8H" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ @@ -62665,347 +265250,322 @@ ], "instructions": [ [ - "FADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddq_f32", + "name": "vmvn_p8", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "poly8x8_t a" ], "return_type": { - "value": "float32x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddq_f64", + "name": "vmvn_s16", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "int16x4_t a" ], "return_type": { - "value": "float64x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddq_s16", + "name": "vmvn_s32", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "int32x2_t a" ], "return_type": { - "value": "int16x8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddq_s32", + "name": "vmvn_s8", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int8x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddq_s64", + "name": "vmvn_u16", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "uint16x4_t a" ], "return_type": { - "value": "int64x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddq_s8", + "name": "vmvn_u32", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "uint32x2_t a" ], "return_type": { - "value": "int8x16_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddq_u16", + "name": "vmvn_u8", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "uint8x8_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddq_u32", + "name": "vmvnq_p8", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "poly8x16_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vn.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddq_u64", + "name": "vmvnq_s16", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "int16x8_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Vn.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddq_u8", + "name": "vmvnq_s32", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "int32x4_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadds_f32", + "name": "vmvnq_s8", "arguments": [ - "float32x2_t a" + "int8x16_t a" ], "return_type": { - "value": "float32_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmax_f16", + "name": "vmvnq_u16", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "uint16x8_t a" ], "return_type": { - "value": "float16x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" + "register": "Vn.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FMAXP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmax_f32", + "name": "vmvnq_u32", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "uint32x4_t a" ], "return_type": { - "value": "float32x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" + "register": "Vn.16B" } }, "Architectures": [ @@ -63015,26 +265575,22 @@ ], "instructions": [ [ - "FMAXP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmax_s16", + "name": "vmvnq_u8", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "uint8x16_t a" ], "return_type": { - "value": "int16x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" + "register": "Vn.16B" } }, "Architectures": [ @@ -63044,55 +265600,46 @@ ], "instructions": [ [ - "SMAXP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmax_s32", + "name": "vneg_f16", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "float16x4_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" + "register": "Vn.4H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SMAXP" + "FNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmax_s8", + "name": "vneg_f32", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "float32x2_t a" ], "return_type": { - "value": "int8x8_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "register": "Vn.2S" } }, "Architectures": [ @@ -63102,55 +265649,45 @@ ], "instructions": [ [ - "SMAXP" + "FNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmax_u16", + "name": "vneg_f64", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "float64x1_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMAXP" + "FNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmax_u32", + "name": "vneg_s16", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "int16x4_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" + "register": "Vn.4H" } }, "Architectures": [ @@ -63160,26 +265697,22 @@ ], "instructions": [ [ - "UMAXP" + "NEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmax_u8", + "name": "vneg_s32", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "int32x2_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "register": "Vn.2S" } }, "Architectures": [ @@ -63189,26 +265722,22 @@ ], "instructions": [ [ - "UMAXP" + "NEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxnm_f16", + "name": "vneg_s64", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "int64x1_t a" ], "return_type": { - "value": "float16x4_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" + "register": "Dn" } }, "Architectures": [ @@ -63216,53 +265745,47 @@ ], "instructions": [ [ - "FMAXNMP" + "NEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxnm_f32", + "name": "vneg_s8", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "int8x8_t a" ], "return_type": { - "value": "float32x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMAXNMP" + "NEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxnmq_f16", + "name": "vnegd_s64", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "int64_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Dn" } }, "Architectures": [ @@ -63270,99 +265793,95 @@ ], "instructions": [ [ - "FMAXNMP" + "NEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxnmq_f32", + "name": "vnegh_f16", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "float16_t a" ], "return_type": { - "value": "float32x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Hn" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FMAXNMP" + "FNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxnmq_f64", + "name": "vnegq_f16", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "float16x8_t a" ], "return_type": { - "value": "float64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Vn.8H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FMAXNMP" + "FNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxnmqd_f64", + "name": "vnegq_f32", "arguments": [ - "float64x2_t a" + "float32x4_t a" ], "return_type": { - "value": "float64_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMAXNMP" + "FNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxnms_f32", + "name": "vnegq_f64", "arguments": [ - "float32x2_t a" + "float64x2_t a" ], "return_type": { - "value": "float32_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.2D" } }, "Architectures": [ @@ -63370,80 +265889,72 @@ ], "instructions": [ [ - "FMAXNMP" + "FNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxq_f16", + "name": "vnegq_s16", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "int16x8_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMAXP" + "NEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxq_f32", + "name": "vnegq_s32", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "int32x4_t a" ], "return_type": { - "value": "float32x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMAXP" + "NEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxq_f64", + "name": "vnegq_s64", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "int64x2_t a" ], "return_type": { - "value": "float64x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" } }, "Architectures": [ @@ -63451,262 +265962,283 @@ ], "instructions": [ [ - "FMAXP" + "NEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxq_s16", + "name": "vnegq_s8", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "int8x16_t a" ], "return_type": { - "value": "int16x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Vn.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMAXP" + "NEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxq_s32", + "name": "vorn_s16", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.8B" }, "b": { - "register": "Vm.4S" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMAXP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxq_s8", + "name": "vorn_s32", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "int8x16_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8B" }, "b": { - "register": "Vm.16B" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMAXP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxq_u16", + "name": "vorn_s64", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "int64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" }, "b": { - "register": "Vm.8H" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMAXP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxq_u32", + "name": "vorn_s8", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.8B" }, "b": { - "register": "Vm.4S" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMAXP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxq_u8", + "name": "vorn_u16", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8B" }, "b": { - "register": "Vm.16B" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMAXP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxqd_f64", + "name": "vorn_u32", "arguments": [ - "float64x2_t a" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "float64_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMAXP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxs_f32", + "name": "vorn_u64", "arguments": [ - "float32x2_t a" + "uint64x1_t a", + "uint64x1_t b" ], "return_type": { - "value": "float32_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMAXP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmin_f16", + "name": "vorn_u8", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "float16x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.8B" }, "b": { - "register": "Vm.4H" + "register": "Vm.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FMINP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmin_f32", + "name": "vornq_s16", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "float32x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.16B" }, "b": { - "register": "Vm.2S" + "register": "Vm.16B" } }, "Architectures": [ @@ -63716,26 +266248,26 @@ ], "instructions": [ [ - "FMINP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmin_s16", + "name": "vornq_s32", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.16B" }, "b": { - "register": "Vm.4H" + "register": "Vm.16B" } }, "Architectures": [ @@ -63745,26 +266277,26 @@ ], "instructions": [ [ - "SMINP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmin_s32", + "name": "vornq_s64", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.16B" }, "b": { - "register": "Vm.2S" + "register": "Vm.16B" } }, "Architectures": [ @@ -63774,26 +266306,26 @@ ], "instructions": [ [ - "SMINP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmin_s8", + "name": "vornq_s8", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "int8x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.16B" }, "b": { - "register": "Vm.8B" + "register": "Vm.16B" } }, "Architectures": [ @@ -63803,26 +266335,26 @@ ], "instructions": [ [ - "SMINP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmin_u16", + "name": "vornq_u16", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.16B" }, "b": { - "register": "Vm.4H" + "register": "Vm.16B" } }, "Architectures": [ @@ -63832,26 +266364,26 @@ ], "instructions": [ [ - "UMINP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmin_u32", + "name": "vornq_u32", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.16B" }, "b": { - "register": "Vm.2S" + "register": "Vm.16B" } }, "Architectures": [ @@ -63861,26 +266393,26 @@ ], "instructions": [ [ - "UMINP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmin_u8", + "name": "vornq_u64", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.16B" }, "b": { - "register": "Vm.8B" + "register": "Vm.16B" } }, "Architectures": [ @@ -63890,329 +266422,361 @@ ], "instructions": [ [ - "UMINP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminnm_f16", + "name": "vornq_u8", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "float16x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.16B" }, "b": { - "register": "Vm.4H" + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINNMP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminnm_f32", + "name": "vorr_s16", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "float32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.8B" }, "b": { - "register": "Vm.2S" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINNMP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminnmq_f16", + "name": "vorr_s32", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "float16x8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" }, "b": { - "register": "Vm.8H" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINNMP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminnmq_f32", + "name": "vorr_s64", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "int64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "float32x4_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.8B" }, "b": { - "register": "Vm.4S" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINNMP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminnmq_f64", + "name": "vorr_s8", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "float64x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.8B" }, "b": { - "register": "Vm.2D" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINNMP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminnmqd_f64", + "name": "vorr_u16", "arguments": [ - "float64x2_t a" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "float64_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINNMP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminnms_f32", + "name": "vorr_u32", "arguments": [ - "float32x2_t a" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "float32_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINNMP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminq_f16", + "name": "vorr_u64", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "uint64x1_t a", + "uint64x1_t b" ], "return_type": { - "value": "float16x8_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" }, "b": { - "register": "Vm.8H" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminq_f32", + "name": "vorr_u8", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "float32x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.8B" }, "b": { - "register": "Vm.4S" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminq_f64", + "name": "vorrq_s16", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "float64x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.16B" }, "b": { - "register": "Vm.2D" + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminq_s16", + "name": "vorrq_s32", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.16B" }, "b": { - "register": "Vm.8H" + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMINP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminq_s32", + "name": "vorrq_s64", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.16B" }, "b": { - "register": "Vm.4S" + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMINP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminq_s8", + "name": "vorrq_s8", "arguments": [ "int8x16_t a", "int8x16_t b" @@ -64229,17 +266793,19 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMINP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminq_u16", + "name": "vorrq_u16", "arguments": [ "uint16x8_t a", "uint16x8_t b" @@ -64249,24 +266815,26 @@ }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.16B" }, "b": { - "register": "Vm.8H" + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMINP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminq_u32", + "name": "vorrq_u32", "arguments": [ "uint32x4_t a", "uint32x4_t b" @@ -64276,30 +266844,32 @@ }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.16B" }, "b": { - "register": "Vm.4S" + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMINP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminq_u8", + "name": "vorrq_u64", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { @@ -64310,72 +266880,90 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMINP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminqd_f64", + "name": "vorrq_u8", "arguments": [ - "float64x2_t a" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "float64_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmins_f32", + "name": "vpadal_s16", "arguments": [ - "float32x2_t a" + "int32x2_t a", + "int16x4_t b" ], "return_type": { - "value": "float32_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.2S" + }, + "b": { + "register": "Vn.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINP" + "SADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabs_s16", + "name": "vpadal_s32", "arguments": [ - "int16x4_t a" + "int64x1_t a", + "int32x2_t b" ], "return_type": { - "value": "int16x4_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.1D" + }, + "b": { + "register": "Vn.2S" } }, "Architectures": [ @@ -64385,22 +266973,26 @@ ], "instructions": [ [ - "SQABS" + "SADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabs_s32", + "name": "vpadal_s8", "arguments": [ - "int32x2_t a" + "int16x4_t a", + "int8x8_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.4H" + }, + "b": { + "register": "Vn.8B" } }, "Architectures": [ @@ -64410,45 +267002,55 @@ ], "instructions": [ [ - "SQABS" + "SADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabs_s64", + "name": "vpadal_u16", "arguments": [ - "int64x1_t a" + "uint32x2_t a", + "uint16x4_t b" ], "return_type": { - "value": "int64x1_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.2S" + }, + "b": { + "register": "Vn.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQABS" + "UADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabs_s8", + "name": "vpadal_u32", "arguments": [ - "int8x8_t a" + "uint64x1_t a", + "uint32x2_t b" ], "return_type": { - "value": "int8x8_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vd.1D" + }, + "b": { + "register": "Vn.2S" } }, "Architectures": [ @@ -64458,91 +267060,113 @@ ], "instructions": [ [ - "SQABS" + "UADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabsb_s8", + "name": "vpadal_u8", "arguments": [ - "int8_t a" + "uint16x4_t a", + "uint8x8_t b" ], "return_type": { - "value": "int8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Bn" + "register": "Vd.4H" + }, + "b": { + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQABS" + "UADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabsd_s64", + "name": "vpadalq_s16", "arguments": [ - "int64_t a" + "int32x4_t a", + "int16x8_t b" ], "return_type": { - "value": "int64_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQABS" + "SADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabsh_s16", + "name": "vpadalq_s32", "arguments": [ - "int16_t a" + "int64x2_t a", + "int32x4_t b" ], "return_type": { - "value": "int16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQABS" + "SADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabsq_s16", + "name": "vpadalq_s8", "arguments": [ - "int16x8_t a" + "int16x8_t a", + "int8x16_t b" ], "return_type": { "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.8H" + }, + "b": { + "register": "Vn.16B" } }, "Architectures": [ @@ -64552,22 +267176,26 @@ ], "instructions": [ [ - "SQABS" + "SADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabsq_s32", + "name": "vpadalq_u16", "arguments": [ - "int32x4_t a" + "uint32x4_t a", + "uint16x8_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" } }, "Architectures": [ @@ -64577,44 +267205,54 @@ ], "instructions": [ [ - "SQABS" + "UADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabsq_s64", + "name": "vpadalq_u32", "arguments": [ - "int64x2_t a" + "uint64x2_t a", + "uint32x4_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQABS" + "UADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabsq_s8", + "name": "vpadalq_u8", "arguments": [ - "int8x16_t a" + "uint16x8_t a", + "uint8x16_t b" ], "return_type": { - "value": "int8x16_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.8H" + }, + "b": { "register": "Vn.16B" } }, @@ -64625,49 +267263,54 @@ ], "instructions": [ [ - "SQABS" + "UADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabss_s32", + "name": "vpadd_f16", "arguments": [ - "int32_t a" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "int32_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "SQABS" + "FADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqadd_s16", + "name": "vpadd_f32", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.2S" }, "b": { - "register": "Vm.4H" + "register": "Vm.2S" } }, "Architectures": [ @@ -64677,26 +267320,26 @@ ], "instructions": [ [ - "SQADD" + "FADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqadd_s32", + "name": "vpadd_s16", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.4H" }, "b": { - "register": "Vm.2S" + "register": "Vm.4H" } }, "Architectures": [ @@ -64706,26 +267349,26 @@ ], "instructions": [ [ - "SQADD" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqadd_s64", + "name": "vpadd_s32", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "int64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.2S" }, "b": { - "register": "Dm" + "register": "Vm.2S" } }, "Architectures": [ @@ -64735,13 +267378,13 @@ ], "instructions": [ [ - "SQADD" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqadd_s8", + "name": "vpadd_s8", "arguments": [ "int8x8_t a", "int8x8_t b" @@ -64764,13 +267407,13 @@ ], "instructions": [ [ - "SQADD" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqadd_u16", + "name": "vpadd_u16", "arguments": [ "uint16x4_t a", "uint16x4_t b" @@ -64793,13 +267436,13 @@ ], "instructions": [ [ - "UQADD" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqadd_u32", + "name": "vpadd_u32", "arguments": [ "uint32x2_t a", "uint32x2_t b" @@ -64822,26 +267465,26 @@ ], "instructions": [ [ - "UQADD" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqadd_u64", + "name": "vpadd_u8", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "uint64x1_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.8B" }, "b": { - "register": "Dm" + "register": "Vm.8B" } }, "Architectures": [ @@ -64851,55 +267494,45 @@ ], "instructions": [ [ - "UQADD" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqadd_u8", + "name": "vpaddd_f64", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "float64x2_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQADD" + "FADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddb_s8", + "name": "vpaddd_s64", "arguments": [ - "int8_t a", - "int8_t b" + "int64x2_t a" ], "return_type": { - "value": "int8_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Bn" - }, - "b": { - "register": "Bm" + "register": "Vn.2D" } }, "Architectures": [ @@ -64907,26 +267540,22 @@ ], "instructions": [ [ - "SQADD" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddb_u8", + "name": "vpaddd_u64", "arguments": [ - "uint8_t a", - "uint8_t b" + "uint64x2_t a" ], "return_type": { - "value": "uint8_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Bn" - }, - "b": { - "register": "Bm" + "register": "Vn.2D" } }, "Architectures": [ @@ -64934,134 +267563,122 @@ ], "instructions": [ [ - "UQADD" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddd_s64", + "name": "vpaddl_s16", "arguments": [ - "int64_t a", - "int64_t b" + "int16x4_t a" ], "return_type": { - "value": "int64_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "b": { - "register": "Dm" + "register": "Vn.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQADD" + "SADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddd_u64", + "name": "vpaddl_s32", "arguments": [ - "uint64_t a", - "uint64_t b" + "int32x2_t a" ], "return_type": { - "value": "uint64_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "b": { - "register": "Dm" + "register": "Vn.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQADD" + "SADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddh_s16", + "name": "vpaddl_s8", "arguments": [ - "int16_t a", - "int16_t b" + "int8x8_t a" ], "return_type": { - "value": "int16_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" - }, - "b": { - "register": "Hm" + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQADD" + "SADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddh_u16", + "name": "vpaddl_u16", "arguments": [ - "uint16_t a", - "uint16_t b" + "uint16x4_t a" ], "return_type": { - "value": "uint16_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" - }, - "b": { - "register": "Hm" + "register": "Vn.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQADD" + "UADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddq_s16", + "name": "vpaddl_u32", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "uint32x2_t a" ], "return_type": { - "value": "int16x8_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Vn.2S" } }, "Architectures": [ @@ -65071,26 +267688,22 @@ ], "instructions": [ [ - "SQADD" + "UADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddq_s32", + "name": "vpaddl_u8", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "uint8x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vn.8B" } }, "Architectures": [ @@ -65100,26 +267713,22 @@ ], "instructions": [ [ - "SQADD" + "UADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddq_s64", + "name": "vpaddlq_s16", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "int16x8_t a" ], "return_type": { - "value": "int64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Vn.8H" } }, "Architectures": [ @@ -65129,26 +267738,22 @@ ], "instructions": [ [ - "SQADD" + "SADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddq_s8", + "name": "vpaddlq_s32", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "int32x4_t a" ], "return_type": { - "value": "int8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" + "register": "Vn.4S" } }, "Architectures": [ @@ -65158,26 +267763,22 @@ ], "instructions": [ [ - "SQADD" + "SADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddq_u16", + "name": "vpaddlq_s8", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "int8x16_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Vn.16B" } }, "Architectures": [ @@ -65187,26 +267788,22 @@ ], "instructions": [ [ - "UQADD" + "SADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddq_u32", + "name": "vpaddlq_u16", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "uint16x8_t a" ], "return_type": { "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vn.8H" } }, "Architectures": [ @@ -65216,26 +267813,22 @@ ], "instructions": [ [ - "UQADD" + "UADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddq_u64", + "name": "vpaddlq_u32", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "uint32x4_t a" ], "return_type": { "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Vn.4S" } }, "Architectures": [ @@ -65245,26 +267838,22 @@ ], "instructions": [ [ - "UQADD" + "UADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddq_u8", + "name": "vpaddlq_u8", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "uint8x16_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" } }, "Architectures": [ @@ -65274,26 +267863,26 @@ ], "instructions": [ [ - "UQADD" + "UADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqadds_s32", + "name": "vpaddq_f16", "arguments": [ - "int32_t a", - "int32_t b" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "int32_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vn.8H" }, "b": { - "register": "Sm" + "register": "Vm.8H" } }, "Architectures": [ @@ -65301,26 +267890,26 @@ ], "instructions": [ [ - "SQADD" + "FADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqadds_u32", + "name": "vpaddq_f32", "arguments": [ - "uint32_t a", - "uint32_t b" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "uint32_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vn.4S" }, "b": { - "register": "Sm" + "register": "Vm.4S" } }, "Architectures": [ @@ -65328,35 +267917,26 @@ ], "instructions": [ [ - "UQADD" + "FADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_high_lane_s16", + "name": "vpaddq_f64", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16x4_t v", - "const int lane" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.2D" }, "b": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "register": "Vm.2D" } }, "Architectures": [ @@ -65364,35 +267944,26 @@ ], "instructions": [ [ - "SQDMLAL2" + "FADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_high_lane_s32", + "name": "vpaddq_s16", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32x2_t v", - "const int lane" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int64x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.8H" }, "b": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vm.8H" } }, "Architectures": [ @@ -65400,35 +267971,26 @@ ], "instructions": [ [ - "SQDMLAL2" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_high_laneq_s16", + "name": "vpaddq_s32", "arguments": [ "int32x4_t a", - "int16x8_t b", - "int16x8_t v", - "const int lane" + "int32x4_t b" ], "return_type": { "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.4S" }, "b": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vm.4S" } }, "Architectures": [ @@ -65436,35 +267998,26 @@ ], "instructions": [ [ - "SQDMLAL2" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_high_laneq_s32", + "name": "vpaddq_s64", "arguments": [ "int64x2_t a", - "int32x4_t b", - "int32x4_t v", - "const int lane" + "int64x2_t b" ], "return_type": { "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.2D" }, "b": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vm.2D" } }, "Architectures": [ @@ -65472,30 +268025,26 @@ ], "instructions": [ [ - "SQDMLAL2" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_high_n_s16", + "name": "vpaddq_s8", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16_t c" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.16B" }, "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.H[0]" + "register": "Vm.16B" } }, "Architectures": [ @@ -65503,30 +268052,26 @@ ], "instructions": [ [ - "SQDMLAL2" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_high_n_s32", + "name": "vpaddq_u16", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32_t c" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.8H" }, "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.S[0]" + "register": "Vm.8H" } }, "Architectures": [ @@ -65534,30 +268079,26 @@ ], "instructions": [ [ - "SQDMLAL2" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_high_s16", + "name": "vpaddq_u32", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16x8_t c" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.4S" }, "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.8H" + "register": "Vm.4S" } }, "Architectures": [ @@ -65565,30 +268106,26 @@ ], "instructions": [ [ - "SQDMLAL2" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_high_s32", + "name": "vpaddq_u64", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32x4_t c" + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.2D" }, "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.4S" + "register": "Vm.2D" } }, "Architectures": [ @@ -65596,178 +268133,133 @@ ], "instructions": [ [ - "SQDMLAL2" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_lane_s16", + "name": "vpaddq_u8", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16x4_t v", - "const int lane" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.16B" }, "b": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "register": "Vm.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMLAL" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_lane_s32", + "name": "vpadds_f32", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32x2_t v", - "const int lane" + "float32x2_t a" ], "return_type": { - "value": "int64x2_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMLAL" + "FADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_laneq_s16", + "name": "vpmax_f16", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16x8_t v", - "const int lane" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.4H" }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "SQDMLAL" + "FMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_laneq_s32", + "name": "vpmax_f32", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32x4_t v", - "const int lane" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "int64x2_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { "register": "Vn.2S" }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMLAL" + "FMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_n_s16", + "name": "vpmax_s16", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16_t c" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.4H" }, - "c": { - "register": "Vm.H[0]" + "b": { + "register": "Vm.4H" } }, "Architectures": [ @@ -65777,30 +268269,26 @@ ], "instructions": [ [ - "SQDMLAL" + "SMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_n_s32", + "name": "vpmax_s32", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32_t c" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "int64x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { "register": "Vn.2S" }, - "c": { - "register": "Vm.S[0]" + "b": { + "register": "Vm.2S" } }, "Architectures": [ @@ -65810,30 +268298,26 @@ ], "instructions": [ [ - "SQDMLAL" + "SMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_s16", + "name": "vpmax_s8", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16x4_t c" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.8B" }, "b": { - "register": "Vn.4H" - }, - "c": { - "register": "Vm.4H" + "register": "Vm.8B" } }, "Architectures": [ @@ -65843,30 +268327,26 @@ ], "instructions": [ [ - "SQDMLAL" + "SMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_s32", + "name": "vpmax_u16", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32x2_t c" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.4H" }, "b": { - "register": "Vn.2S" - }, - "c": { - "register": "Vm.2S" + "register": "Vm.4H" } }, "Architectures": [ @@ -65876,102 +268356,84 @@ ], "instructions": [ [ - "SQDMLAL" + "UMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlalh_lane_s16", + "name": "vpmax_u32", "arguments": [ - "int32_t a", - "int16_t b", - "int16x4_t v", - "const int lane" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "int32_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Vn.2S" }, "b": { - "register": "Hn" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMLAL" + "UMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlalh_laneq_s16", + "name": "vpmax_u8", "arguments": [ - "int32_t a", - "int16_t b", - "int16x8_t v", - "const int lane" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "int32_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Vn.8B" }, "b": { - "register": "Hn" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMLAL" + "UMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlalh_s16", + "name": "vpmaxnm_f16", "arguments": [ - "int32_t a", - "int16_t b", - "int16_t c" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "int32_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Vn.4H" }, "b": { - "register": "Hn" - }, - "c": { - "register": "Hm" + "register": "Vm.4H" } }, "Architectures": [ @@ -65979,34 +268441,25 @@ ], "instructions": [ [ - "SQDMLAL" + "FMAXNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlals_lane_s32", + "name": "vpmaxnm_f32", "arguments": [ - "int64_t a", - "int32_t b", - "int32x2_t v", - "const int lane" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "int64_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.2S" }, "b": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { "register": "Vm.2S" } }, @@ -66015,35 +268468,26 @@ ], "instructions": [ [ - "SQDMLAL" + "FMAXNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlals_laneq_s32", + "name": "vpmaxnmq_f16", "arguments": [ - "int64_t a", - "int32_t b", - "int32x4_t v", - "const int lane" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "int64_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.8H" }, "b": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vm.8H" } }, "Architectures": [ @@ -66051,30 +268495,26 @@ ], "instructions": [ [ - "SQDMLAL" + "FMAXNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlals_s32", + "name": "vpmaxnmq_f32", "arguments": [ - "int64_t a", - "int32_t b", - "int32_t c" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "int64_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.4S" }, "b": { - "register": "Sn" - }, - "c": { - "register": "Sm" + "register": "Vm.4S" } }, "Architectures": [ @@ -66082,35 +268522,26 @@ ], "instructions": [ [ - "SQDMLAL" + "FMAXNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_high_lane_s16", + "name": "vpmaxnmq_f64", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16x4_t v", - "const int lane" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.2D" }, "b": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "register": "Vm.2D" } }, "Architectures": [ @@ -66118,35 +268549,22 @@ ], "instructions": [ [ - "SQDMLSL2" + "FMAXNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_high_lane_s32", + "name": "vpmaxnmqd_f64", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32x2_t v", - "const int lane" + "float64x2_t a" ], "return_type": { - "value": "int64x2_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vn.2D" } }, "Architectures": [ @@ -66154,35 +268572,22 @@ ], "instructions": [ [ - "SQDMLSL2" + "FMAXNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_high_laneq_s16", + "name": "vpmaxnms_f32", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16x8_t v", - "const int lane" + "float32x2_t a" ], "return_type": { - "value": "int32x4_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vn.2S" } }, "Architectures": [ @@ -66190,35 +268595,26 @@ ], "instructions": [ [ - "SQDMLSL2" + "FMAXNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_high_laneq_s32", + "name": "vpmaxq_f16", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32x4_t v", - "const int lane" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "int64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.8H" }, "b": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vm.8H" } }, "Architectures": [ @@ -66226,30 +268622,26 @@ ], "instructions": [ [ - "SQDMLSL2" + "FMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_high_n_s16", + "name": "vpmaxq_f32", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16_t c" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.4S" }, "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.H[0]" + "register": "Vm.4S" } }, "Architectures": [ @@ -66257,30 +268649,26 @@ ], "instructions": [ [ - "SQDMLSL2" + "FMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_high_n_s32", + "name": "vpmaxq_f64", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32_t c" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "int64x2_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.2D" }, "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.S[0]" + "register": "Vm.2D" } }, "Architectures": [ @@ -66288,29 +268676,25 @@ ], "instructions": [ [ - "SQDMLSL2" + "FMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_high_s16", + "name": "vpmaxq_s16", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16x8_t c" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.8H" }, - "c": { + "b": { "register": "Vm.8H" } }, @@ -66319,29 +268703,25 @@ ], "instructions": [ [ - "SQDMLSL2" + "SMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_high_s32", + "name": "vpmaxq_s32", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32x4_t c" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "int64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { "register": "Vn.4S" }, - "c": { + "b": { "register": "Vm.4S" } }, @@ -66350,111 +268730,80 @@ ], "instructions": [ [ - "SQDMLSL2" + "SMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_lane_s16", + "name": "vpmaxq_s8", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16x4_t v", - "const int lane" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.16B" }, "b": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "register": "Vm.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMLSL" + "SMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_lane_s32", + "name": "vpmaxq_u16", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32x2_t v", - "const int lane" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.8H" }, "b": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMLSL" + "UMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_laneq_s16", + "name": "vpmaxq_u32", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16x8_t v", - "const int lane" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.4S" }, "b": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vm.4S" } }, "Architectures": [ @@ -66462,35 +268811,26 @@ ], "instructions": [ [ - "SQDMLSL" + "UMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_laneq_s32", + "name": "vpmaxq_u8", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32x4_t v", - "const int lane" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.16B" }, "b": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vm.16B" } }, "Architectures": [ @@ -66498,128 +268838,99 @@ ], "instructions": [ [ - "SQDMLSL" + "UMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_n_s16", + "name": "vpmaxqd_f64", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16_t c" + "float64x2_t a" ], "return_type": { - "value": "int32x4_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4H" - }, - "c": { - "register": "Vm.H[0]" + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMLSL" + "FMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_n_s32", + "name": "vpmaxs_f32", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32_t c" + "float32x2_t a" ], "return_type": { - "value": "int64x2_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { "register": "Vn.2S" - }, - "c": { - "register": "Vm.S[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMLSL" + "FMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_s16", + "name": "vpmin_f16", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16x4_t c" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.4H" }, - "c": { + "b": { "register": "Vm.4H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SQDMLSL" + "FMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_s32", + "name": "vpmin_f32", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32x2_t c" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "int64x2_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { "register": "Vn.2S" }, - "c": { + "b": { "register": "Vm.2S" } }, @@ -66630,304 +268941,253 @@ ], "instructions": [ [ - "SQDMLSL" + "FMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlslh_lane_s16", + "name": "vpmin_s16", "arguments": [ - "int32_t a", - "int16_t b", - "int16x4_t v", - "const int lane" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int32_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Vn.4H" }, "b": { - "register": "Hn" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMLSL" + "SMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlslh_laneq_s16", + "name": "vpmin_s32", "arguments": [ - "int32_t a", - "int16_t b", - "int16x8_t v", - "const int lane" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "int32_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Vn.2S" }, "b": { - "register": "Hn" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMLSL" + "SMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlslh_s16", + "name": "vpmin_s8", "arguments": [ - "int32_t a", - "int16_t b", - "int16_t c" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "int32_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Vn.8B" }, "b": { - "register": "Hn" - }, - "c": { - "register": "Hm" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMLSL" + "SMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsls_lane_s32", + "name": "vpmin_u16", "arguments": [ - "int64_t a", - "int32_t b", - "int32x2_t v", - "const int lane" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "int64_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.4H" }, "b": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMLSL" + "UMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsls_laneq_s32", + "name": "vpmin_u32", "arguments": [ - "int64_t a", - "int32_t b", - "int32x4_t v", - "const int lane" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "int64_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.2S" }, "b": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMLSL" + "UMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsls_s32", + "name": "vpmin_u8", "arguments": [ - "int64_t a", - "int32_t b", - "int32_t c" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "int64_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.8B" }, "b": { - "register": "Sn" - }, - "c": { - "register": "Sm" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMLSL" + "UMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulh_lane_s16", + "name": "vpminnm_f16", "arguments": [ - "int16x4_t a", - "int16x4_t v", - "const int lane" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "int16x4_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4H" }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { + "b": { "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULH" + "FMINNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulh_lane_s32", + "name": "vpminnm_f32", "arguments": [ - "int32x2_t a", - "int32x2_t v", - "const int lane" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "int32x2_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2S" }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { + "b": { "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULH" + "FMINNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulh_laneq_s16", + "name": "vpminnmq_f16", "arguments": [ - "int16x4_t a", - "int16x8_t v", - "const int lane" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "int16x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 7 + "register": "Vn.8H" }, - "v": { + "b": { "register": "Vm.8H" } }, @@ -66936,30 +269196,25 @@ ], "instructions": [ [ - "SQDMULH" + "FMINNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulh_laneq_s32", + "name": "vpminnmq_f32", "arguments": [ - "int32x2_t a", - "int32x4_t v", - "const int lane" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "int32x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Vn.4S" }, - "v": { + "b": { "register": "Vm.4S" } }, @@ -66968,147 +269223,126 @@ ], "instructions": [ [ - "SQDMULH" + "FMINNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulh_n_s16", + "name": "vpminnmq_f64", "arguments": [ - "int16x4_t a", - "int16_t b" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "int16x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.2D" }, "b": { - "register": "Vm.H[0]" + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULH" + "FMINNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulh_n_s32", + "name": "vpminnmqd_f64", "arguments": [ - "int32x2_t a", - "int32_t b" + "float64x2_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.S[0]" + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULH" + "FMINNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulh_s16", + "name": "vpminnms_f32", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "float32x2_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" + "register": "Vn.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULH" + "FMINNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulh_s32", + "name": "vpminq_f16", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "int32x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.8H" }, "b": { - "register": "Vm.2S" + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULH" + "FMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhh_lane_s16", + "name": "vpminq_f32", "arguments": [ - "int16_t a", - "int16x4_t v", - "const int lane" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "int16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Vn.4S" }, - "v": { - "register": "Vm.4H" + "b": { + "register": "Vm.4S" } }, "Architectures": [ @@ -67116,31 +269350,26 @@ ], "instructions": [ [ - "SQDMULH" + "FMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhh_laneq_s16", + "name": "vpminq_f64", "arguments": [ - "int16_t a", - "int16x8_t v", - "const int lane" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "int16_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" - }, - "lane": { - "minimum": 0, - "maximum": 7 + "register": "Vn.2D" }, - "v": { - "register": "Vm.8H" + "b": { + "register": "Vm.2D" } }, "Architectures": [ @@ -67148,26 +269377,26 @@ ], "instructions": [ [ - "SQDMULH" + "FMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhh_s16", + "name": "vpminq_s16", "arguments": [ - "int16_t a", - "int16_t b" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vn.8H" }, "b": { - "register": "Hm" + "register": "Vm.8H" } }, "Architectures": [ @@ -67175,98 +269404,79 @@ ], "instructions": [ [ - "SQDMULH" + "SMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhq_lane_s16", + "name": "vpminq_s32", "arguments": [ - "int16x8_t a", - "int16x4_t v", - "const int lane" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Vn.4S" }, - "v": { - "register": "Vm.4H" + "b": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULH" + "SMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhq_lane_s32", + "name": "vpminq_s8", "arguments": [ - "int32x4_t a", - "int32x2_t v", - "const int lane" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 1 + "register": "Vn.16B" }, - "v": { - "register": "Vm.2S" + "b": { + "register": "Vm.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULH" + "SMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhq_laneq_s16", + "name": "vpminq_u16", "arguments": [ - "int16x8_t a", - "int16x8_t v", - "const int lane" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int16x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { + "b": { "register": "Vm.8H" } }, @@ -67275,30 +269485,25 @@ ], "instructions": [ [ - "SQDMULH" + "UMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhq_laneq_s32", + "name": "vpminq_u32", "arguments": [ - "int32x4_t a", - "int32x4_t v", - "const int lane" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4S" }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { + "b": { "register": "Vm.4S" } }, @@ -67307,113 +269512,95 @@ ], "instructions": [ [ - "SQDMULH" + "UMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhq_n_s16", + "name": "vpminq_u8", "arguments": [ - "int16x8_t a", - "int16_t b" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "int16x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.16B" }, "b": { - "register": "Vm.H[0]" + "register": "Vm.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULH" + "UMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhq_n_s32", + "name": "vpminqd_f64", "arguments": [ - "int32x4_t a", - "int32_t b" + "float64x2_t a" ], "return_type": { - "value": "int32x4_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.S[0]" + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULH" + "FMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhq_s16", + "name": "vpmins_f32", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "float32x2_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Vn.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULH" + "FMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhq_s32", + "name": "vqabs_s16", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int16x4_t a" ], "return_type": { - "value": "int32x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vn.4H" } }, "Architectures": [ @@ -67423,63 +269610,47 @@ ], "instructions": [ [ - "SQDMULH" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhs_lane_s32", + "name": "vqabs_s32", "arguments": [ - "int32_t a", - "int32x2_t v", - "const int lane" + "int32x2_t a" ], "return_type": { - "value": "int32_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vn.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMULH" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhs_laneq_s32", + "name": "vqabs_s64", "arguments": [ - "int32_t a", - "int32x4_t v", - "const int lane" + "int64x1_t a" ], "return_type": { - "value": "int32_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Dn" } }, "Architectures": [ @@ -67487,58 +269658,47 @@ ], "instructions": [ [ - "SQDMULH" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhs_s32", + "name": "vqabs_s8", "arguments": [ - "int32_t a", - "int32_t b" + "int8x8_t a" ], "return_type": { - "value": "int32_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" - }, - "b": { - "register": "Sm" + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMULH" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_high_lane_s16", + "name": "vqabsb_s8", "arguments": [ - "int16x8_t a", - "int16x4_t v", - "const int lane" + "int8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "int8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "register": "Bn" } }, "Architectures": [ @@ -67546,31 +269706,22 @@ ], "instructions": [ [ - "SQDMULL2" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_high_lane_s32", + "name": "vqabsd_s64", "arguments": [ - "int32x4_t a", - "int32x2_t v", - "const int lane" + "int64_t a" ], "return_type": { - "value": "int64x2_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Dn" } }, "Architectures": [ @@ -67578,31 +269729,22 @@ ], "instructions": [ [ - "SQDMULL2" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_high_laneq_s16", + "name": "vqabsh_s16", "arguments": [ - "int16x8_t a", - "int16x8_t v", - "const int lane" + "int16_t a" ], "return_type": { - "value": "int32x4_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Hn" } }, "Architectures": [ @@ -67610,85 +269752,72 @@ ], "instructions": [ [ - "SQDMULL2" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_high_laneq_s32", + "name": "vqabsq_s16", "arguments": [ - "int32x4_t a", - "int32x4_t v", - "const int lane" + "int16x8_t a" ], "return_type": { - "value": "int64x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vn.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMULL2" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_high_n_s16", + "name": "vqabsq_s32", "arguments": [ - "int16x8_t a", - "int16_t b" + "int32x4_t a" ], "return_type": { "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.H[0]" + "register": "Vn.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMULL2" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_high_n_s32", + "name": "vqabsq_s64", "arguments": [ - "int32x4_t a", - "int32_t b" + "int64x2_t a" ], "return_type": { "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.S[0]" + "register": "Vn.2D" } }, "Architectures": [ @@ -67696,53 +269825,47 @@ ], "instructions": [ [ - "SQDMULL2" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_high_s16", + "name": "vqabsq_s8", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "int8x16_t a" ], "return_type": { - "value": "int32x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Vn.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMULL2" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_high_s32", + "name": "vqabss_s32", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int32_t a" ], "return_type": { - "value": "int64x2_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Sn" } }, "Architectures": [ @@ -67750,30 +269873,25 @@ ], "instructions": [ [ - "SQDMULL2" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_lane_s16", + "name": "vqadd_s16", "arguments": [ "int16x4_t a", - "int16x4_t v", - "const int lane" + "int16x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4H" }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { + "b": { "register": "Vm.4H" } }, @@ -67784,30 +269902,25 @@ ], "instructions": [ [ - "SQDMULL" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_lane_s32", + "name": "vqadd_s32", "arguments": [ "int32x2_t a", - "int32x2_t v", - "const int lane" + "int32x2_t b" ], "return_type": { - "value": "int64x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2S" }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { + "b": { "register": "Vm.2S" } }, @@ -67818,90 +269931,84 @@ ], "instructions": [ [ - "SQDMULL" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_laneq_s16", + "name": "vqadd_s64", "arguments": [ - "int16x4_t a", - "int16x8_t v", - "const int lane" + "int64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 7 + "register": "Dn" }, - "v": { - "register": "Vm.8H" + "b": { + "register": "Dm" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMULL" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_laneq_s32", + "name": "vqadd_s8", "arguments": [ - "int32x2_t a", - "int32x4_t v", - "const int lane" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "int64x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Vn.8B" }, - "v": { - "register": "Vm.4S" + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMULL" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_n_s16", + "name": "vqadd_u16", "arguments": [ - "int16x4_t a", - "int16_t b" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4H" }, "b": { - "register": "Vm.H[0]" + "register": "Vm.4H" } }, "Architectures": [ @@ -67911,26 +270018,26 @@ ], "instructions": [ [ - "SQDMULL" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_n_s32", + "name": "vqadd_u32", "arguments": [ - "int32x2_t a", - "int32_t b" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2S" }, "b": { - "register": "Vm.S[0]" + "register": "Vm.2S" } }, "Architectures": [ @@ -67940,26 +270047,26 @@ ], "instructions": [ [ - "SQDMULL" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_s16", + "name": "vqadd_u64", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "uint64x1_t a", + "uint64x1_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Dn" }, "b": { - "register": "Vm.4H" + "register": "Dm" } }, "Architectures": [ @@ -67969,26 +270076,26 @@ ], "instructions": [ [ - "SQDMULL" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_s32", + "name": "vqadd_u8", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.8B" }, "b": { - "register": "Vm.2S" + "register": "Vm.8B" } }, "Architectures": [ @@ -67998,31 +270105,26 @@ ], "instructions": [ [ - "SQDMULL" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmullh_lane_s16", + "name": "vqaddb_s8", "arguments": [ - "int16_t a", - "int16x4_t v", - "const int lane" + "int8_t a", + "int8_t b" ], "return_type": { - "value": "int32_t" + "value": "int8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Bn" }, - "v": { - "register": "Vm.4H" + "b": { + "register": "Bm" } }, "Architectures": [ @@ -68030,31 +270132,26 @@ ], "instructions": [ [ - "SQDMULL" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmullh_laneq_s16", + "name": "vqaddb_u8", "arguments": [ - "int16_t a", - "int16x8_t v", - "const int lane" + "uint8_t a", + "uint8_t b" ], "return_type": { - "value": "int32_t" + "value": "uint8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" - }, - "lane": { - "minimum": 0, - "maximum": 7 + "register": "Bn" }, - "v": { - "register": "Vm.8H" + "b": { + "register": "Bm" } }, "Architectures": [ @@ -68062,26 +270159,26 @@ ], "instructions": [ [ - "SQDMULL" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmullh_s16", + "name": "vqaddd_s64", "arguments": [ - "int16_t a", - "int16_t b" + "int64_t a", + "int64_t b" ], "return_type": { - "value": "int32_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Dn" }, "b": { - "register": "Hm" + "register": "Dm" } }, "Architectures": [ @@ -68089,31 +270186,26 @@ ], "instructions": [ [ - "SQDMULL" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulls_lane_s32", + "name": "vqaddd_u64", "arguments": [ - "int32_t a", - "int32x2_t v", - "const int lane" + "uint64_t a", + "uint64_t b" ], "return_type": { - "value": "int64_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 1 + "register": "Dn" }, - "v": { - "register": "Vm.2S" + "b": { + "register": "Dm" } }, "Architectures": [ @@ -68121,31 +270213,26 @@ ], "instructions": [ [ - "SQDMULL" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulls_laneq_s32", + "name": "vqaddh_s16", "arguments": [ - "int32_t a", - "int32x4_t v", - "const int lane" + "int16_t a", + "int16_t b" ], "return_type": { - "value": "int64_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Hn" }, - "v": { - "register": "Vm.4S" + "b": { + "register": "Hm" } }, "Architectures": [ @@ -68153,26 +270240,26 @@ ], "instructions": [ [ - "SQDMULL" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulls_s32", + "name": "vqaddh_u16", "arguments": [ - "int32_t a", - "int32_t b" + "uint16_t a", + "uint16_t b" ], "return_type": { - "value": "int64_t" + "value": "uint16_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Hn" }, "b": { - "register": "Sm" + "register": "Hm" } }, "Architectures": [ @@ -68180,184 +270267,200 @@ ], "instructions": [ [ - "SQDMULL" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_high_s16", + "name": "vqaddq_s16", "arguments": [ - "int8x8_t r", - "int16x8_t a" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int8x16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" }, - "r": { - "register": "Vd.8B" + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQXTN2" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_high_s32", + "name": "vqaddq_s32", "arguments": [ - "int16x4_t r", - "int32x4_t a" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4S" }, - "r": { - "register": "Vd.4H" + "b": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQXTN2" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_high_s64", + "name": "vqaddq_s64", "arguments": [ - "int32x2_t r", - "int64x2_t a" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2D" }, - "r": { - "register": "Vd.2S" + "b": { + "register": "Vm.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQXTN2" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_high_u16", + "name": "vqaddq_s8", "arguments": [ - "uint8x8_t r", - "uint16x8_t a" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.16B" }, - "r": { - "register": "Vd.8B" + "b": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQXTN2" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_high_u32", + "name": "vqaddq_u16", "arguments": [ - "uint16x4_t r", - "uint32x4_t a" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.8H" }, - "r": { - "register": "Vd.4H" + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQXTN2" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_high_u64", + "name": "vqaddq_u32", "arguments": [ - "uint32x2_t r", - "uint64x2_t a" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.4S" }, - "r": { - "register": "Vd.2S" + "b": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQXTN2" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_s16", + "name": "vqaddq_u64", "arguments": [ - "int16x8_t a" + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "int8x8_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" } }, "Architectures": [ @@ -68367,22 +270470,26 @@ ], "instructions": [ [ - "SQXTN" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_s32", + "name": "vqaddq_u8", "arguments": [ - "int32x4_t a" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "int16x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" } }, "Architectures": [ @@ -68392,122 +270499,197 @@ ], "instructions": [ [ - "SQXTN" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_s64", + "name": "vqadds_s32", "arguments": [ - "int64x2_t a" + "int32_t a", + "int32_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Sn" + }, + "b": { + "register": "Sm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQXTN" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_u16", + "name": "vqadds_u32", "arguments": [ - "uint16x8_t a" + "uint32_t a", + "uint32_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqdmlal_high_lane_s16", + "arguments": [ + "int32x4_t a", + "int16x8_t b", + "int16x4_t v", + "const int lane" + ], + "return_type": { + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.4S" + }, + "b": { "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQXTN" + "SQDMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_u32", + "name": "vqdmlal_high_lane_s32", "arguments": [ - "uint32x4_t a" + "int64x2_t a", + "int32x4_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "uint16x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.2D" + }, + "b": { "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQXTN" + "SQDMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_u64", + "name": "vqdmlal_high_laneq_s16", "arguments": [ - "uint64x2_t a" + "int32x4_t a", + "int16x8_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQXTN" + "SQDMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovnd_s64", + "name": "vqdmlal_high_laneq_s32", "arguments": [ - "int64_t a" + "int64x2_t a", + "int32x4_t b", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "int32_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ @@ -68515,22 +270697,30 @@ ], "instructions": [ [ - "SQXTN" + "SQDMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovnd_u64", + "name": "vqdmlal_high_n_s16", "arguments": [ - "uint64_t a" + "int32x4_t a", + "int16x8_t b", + "int16_t c" ], "return_type": { - "value": "uint32_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ @@ -68538,22 +270728,30 @@ ], "instructions": [ [ - "UQXTN" + "SQDMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovnh_s16", + "name": "vqdmlal_high_n_s32", "arguments": [ - "int16_t a" + "int64x2_t a", + "int32x4_t b", + "int32_t c" ], "return_type": { - "value": "int8_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ @@ -68561,22 +270759,30 @@ ], "instructions": [ [ - "SQXTN" + "SQDMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovnh_u16", + "name": "vqdmlal_high_s16", "arguments": [ - "uint16_t a" + "int32x4_t a", + "int16x8_t b", + "int16x8_t c" ], "return_type": { - "value": "uint8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.8H" } }, "Architectures": [ @@ -68584,22 +270790,30 @@ ], "instructions": [ [ - "UQXTN" + "SQDMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovns_s32", + "name": "vqdmlal_high_s32", "arguments": [ - "int32_t a" + "int64x2_t a", + "int32x4_t b", + "int32x4_t c" ], "return_type": { - "value": "int16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" } }, "Architectures": [ @@ -68607,76 +270821,111 @@ ], "instructions": [ [ - "SQXTN" + "SQDMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovns_u32", + "name": "vqdmlal_lane_s16", "arguments": [ - "uint32_t a" + "int32x4_t a", + "int16x4_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQXTN" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovun_high_s16", + "name": "vqdmlal_lane_s32", "arguments": [ - "uint8x8_t r", - "int16x8_t a" + "int64x2_t a", + "int32x2_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.2D" }, - "r": { - "register": "Vd.8B" + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQXTUN2" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovun_high_s32", + "name": "vqdmlal_laneq_s16", "arguments": [ - "uint16x4_t r", - "int32x4_t a" + "int32x4_t a", + "int16x4_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.4S" }, - "r": { - "register": "Vd.4H" + "b": { + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ @@ -68684,26 +270933,35 @@ ], "instructions": [ [ - "SQXTUN2" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovun_high_s64", + "name": "vqdmlal_laneq_s32", "arguments": [ - "uint32x2_t r", - "int64x2_t a" + "int64x2_t a", + "int32x2_t b", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.2D" }, - "r": { - "register": "Vd.2S" + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ @@ -68711,22 +270969,30 @@ ], "instructions": [ [ - "SQXTUN2" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovun_s16", + "name": "vqdmlal_n_s16", "arguments": [ - "int16x8_t a" + "int32x4_t a", + "int16x4_t b", + "int16_t c" ], "return_type": { - "value": "uint8x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ @@ -68736,22 +271002,30 @@ ], "instructions": [ [ - "SQXTUN" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovun_s32", + "name": "vqdmlal_n_s32", "arguments": [ - "int32x4_t a" + "int64x2_t a", + "int32x2_t b", + "int32_t c" ], "return_type": { - "value": "uint16x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ @@ -68761,22 +271035,30 @@ ], "instructions": [ [ - "SQXTUN" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovun_s64", + "name": "vqdmlal_s16", "arguments": [ - "int64x2_t a" + "int32x4_t a", + "int16x4_t b", + "int16x4_t c" ], "return_type": { - "value": "uint32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.4H" } }, "Architectures": [ @@ -68786,45 +271068,68 @@ ], "instructions": [ [ - "SQXTUN" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovund_s64", + "name": "vqdmlal_s32", "arguments": [ - "int64_t a" + "int64x2_t a", + "int32x2_t b", + "int32x2_t c" ], "return_type": { - "value": "uint32_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQXTUN" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovunh_s16", + "name": "vqdmlalh_lane_s16", "arguments": [ - "int16_t a" + "int32_t a", + "int16_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint8_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { + "register": "Sd" + }, + "b": { "register": "Hn" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -68832,22 +271137,35 @@ ], "instructions": [ [ - "SQXTUN" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovuns_s32", + "name": "vqdmlalh_laneq_s16", "arguments": [ - "int32_t a" + "int32_t a", + "int16_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "uint16_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Sd" + }, + "b": { + "register": "Hn" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ @@ -68855,72 +271173,102 @@ ], "instructions": [ [ - "SQXTUN" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqneg_s16", + "name": "vqdmlalh_s16", "arguments": [ - "int16x4_t a" + "int32_t a", + "int16_t b", + "int16_t c" ], "return_type": { - "value": "int16x4_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Sd" + }, + "b": { + "register": "Hn" + }, + "c": { + "register": "Hm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQNEG" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqneg_s32", + "name": "vqdmlals_lane_s32", "arguments": [ - "int32x2_t a" + "int64_t a", + "int32_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Dd" + }, + "b": { + "register": "Sn" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQNEG" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqneg_s64", + "name": "vqdmlals_laneq_s32", "arguments": [ - "int64x1_t a" + "int64_t a", + "int32_t b", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "int64x1_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Dd" + }, + "b": { + "register": "Sn" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ @@ -68928,47 +271276,66 @@ ], "instructions": [ [ - "SQNEG" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqneg_s8", + "name": "vqdmlals_s32", "arguments": [ - "int8x8_t a" + "int64_t a", + "int32_t b", + "int32_t c" ], "return_type": { - "value": "int8x8_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Dd" + }, + "b": { + "register": "Sn" + }, + "c": { + "register": "Sm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQNEG" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqnegb_s8", + "name": "vqdmlsl_high_lane_s16", "arguments": [ - "int8_t a" + "int32x4_t a", + "int16x8_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "int8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Bn" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -68976,22 +271343,35 @@ ], "instructions": [ [ - "SQNEG" + "SQDMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqnegd_s64", + "name": "vqdmlsl_high_lane_s32", "arguments": [ - "int64_t a" + "int64x2_t a", + "int32x4_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "int64_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -68999,22 +271379,35 @@ ], "instructions": [ [ - "SQNEG" + "SQDMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqnegh_s16", + "name": "vqdmlsl_high_laneq_s16", "arguments": [ - "int16_t a" + "int32x4_t a", + "int16x8_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "int16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ @@ -69022,72 +271415,97 @@ ], "instructions": [ [ - "SQNEG" + "SQDMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqnegq_s16", + "name": "vqdmlsl_high_laneq_s32", "arguments": [ - "int16x8_t a" + "int64x2_t a", + "int32x4_t b", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQNEG" + "SQDMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqnegq_s32", + "name": "vqdmlsl_high_n_s16", "arguments": [ - "int32x4_t a" + "int32x4_t a", + "int16x8_t b", + "int16_t c" ], "return_type": { "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQNEG" + "SQDMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqnegq_s64", + "name": "vqdmlsl_high_n_s32", "arguments": [ - "int64x2_t a" + "int64x2_t a", + "int32x4_t b", + "int32_t c" ], "return_type": { "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ @@ -69095,47 +271513,61 @@ ], "instructions": [ [ - "SQNEG" + "SQDMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqnegq_s8", + "name": "vqdmlsl_high_s16", "arguments": [ - "int8x16_t a" + "int32x4_t a", + "int16x8_t b", + "int16x8_t c" ], "return_type": { - "value": "int8x16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQNEG" + "SQDMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqnegs_s32", + "name": "vqdmlsl_high_s32", "arguments": [ - "int32_t a" + "int64x2_t a", + "int32x4_t b", + "int32x4_t c" ], "return_type": { - "value": "int32_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" } }, "Architectures": [ @@ -69143,25 +271575,25 @@ ], "instructions": [ [ - "SQNEG" + "SQDMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlah_lane_s16", + "name": "vqdmlsl_lane_s16", "arguments": [ - "int16x4_t a", + "int32x4_t a", "int16x4_t b", "int16x4_t v", "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.4S" }, "b": { "register": "Vn.4H" @@ -69175,29 +271607,31 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLAH" + "SQDMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlah_lane_s32", + "name": "vqdmlsl_lane_s32", "arguments": [ - "int32x2_t a", + "int64x2_t a", "int32x2_t b", "int32x2_t v", "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.2D" }, "b": { "register": "Vn.2S" @@ -69211,29 +271645,31 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLAH" + "SQDMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlah_laneq_s16", + "name": "vqdmlsl_laneq_s16", "arguments": [ - "int16x4_t a", + "int32x4_t a", "int16x4_t b", "int16x8_t v", "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.4S" }, "b": { "register": "Vn.4H" @@ -69251,25 +271687,25 @@ ], "instructions": [ [ - "SQRDMLAH" + "SQDMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlah_laneq_s32", + "name": "vqdmlsl_laneq_s32", "arguments": [ - "int32x2_t a", + "int64x2_t a", "int32x2_t b", "int32x4_t v", "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.2D" }, "b": { "register": "Vn.2S" @@ -69287,24 +271723,90 @@ ], "instructions": [ [ - "SQRDMLAH" + "SQDMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlah_s16", + "name": "vqdmlsl_n_s16", "arguments": [ - "int16x4_t a", + "int32x4_t a", + "int16x4_t b", + "int16_t c" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.H[0]" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQDMLSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqdmlsl_n_s32", + "arguments": [ + "int64x2_t a", + "int32x2_t b", + "int32_t c" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.S[0]" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQDMLSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqdmlsl_s16", + "arguments": [ + "int32x4_t a", "int16x4_t b", "int16x4_t c" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.4S" }, "b": { "register": "Vn.4H" @@ -69314,28 +271816,30 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLAH" + "SQDMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlah_s32", + "name": "vqdmlsl_s32", "arguments": [ - "int32x2_t a", + "int64x2_t a", "int32x2_t b", "int32x2_t c" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.2D" }, "b": { "register": "Vn.2S" @@ -69345,29 +271849,31 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLAH" + "SQDMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahh_lane_s16", + "name": "vqdmlslh_lane_s16", "arguments": [ - "int16_t a", + "int32_t a", "int16_t b", "int16x4_t v", "const int lane" ], "return_type": { - "value": "int16_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Hd" + "register": "Sd" }, "b": { "register": "Hn" @@ -69385,25 +271891,25 @@ ], "instructions": [ [ - "SQRDMLAH" + "SQDMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahh_laneq_s16", + "name": "vqdmlslh_laneq_s16", "arguments": [ - "int16_t a", + "int32_t a", "int16_t b", "int16x8_t v", "const int lane" ], "return_type": { - "value": "int16_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Hd" + "register": "Sd" }, "b": { "register": "Hn" @@ -69421,24 +271927,24 @@ ], "instructions": [ [ - "SQRDMLAH" + "SQDMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahh_s16", + "name": "vqdmlslh_s16", "arguments": [ - "int16_t a", + "int32_t a", "int16_t b", "int16_t c" ], "return_type": { - "value": "int16_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Hd" + "register": "Sd" }, "b": { "register": "Hn" @@ -69452,28 +271958,127 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahq_lane_s16", + "name": "vqdmlsls_lane_s32", "arguments": [ - "int16x8_t a", - "int16x8_t b", - "int16x4_t v", + "int64_t a", + "int32_t b", + "int32x2_t v", "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Dd" }, "b": { - "register": "Vn.8H" + "register": "Sn" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqdmlsls_laneq_s32", + "arguments": [ + "int64_t a", + "int32_t b", + "int32x4_t v", + "const int lane" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dd" + }, + "b": { + "register": "Sn" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqdmlsls_s32", + "arguments": [ + "int64_t a", + "int32_t b", + "int32_t c" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dd" + }, + "b": { + "register": "Sn" + }, + "c": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqdmulh_lane_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t v", + "const int lane" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" }, "lane": { "minimum": 0, @@ -69484,32 +272089,30 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLAH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahq_lane_s32", + "name": "vqdmulh_lane_s32", "arguments": [ - "int32x4_t a", - "int32x4_t b", + "int32x2_t a", "int32x2_t v", "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" + "register": "Vn.2S" }, "lane": { "minimum": 0, @@ -69520,31 +272123,29 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLAH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahq_laneq_s16", + "name": "vqdmulh_laneq_s16", "arguments": [ - "int16x8_t a", - "int16x8_t b", + "int16x4_t a", "int16x8_t v", "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { "register": "Vn.4H" }, "lane": { @@ -69560,27 +272161,23 @@ ], "instructions": [ [ - "SQRDMLAH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahq_laneq_s32", + "name": "vqdmulh_laneq_s32", "arguments": [ - "int32x4_t a", - "int32x4_t b", + "int32x2_t a", "int32x4_t v", "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { "register": "Vn.2S" }, "lane": { @@ -69596,97 +272193,147 @@ ], "instructions": [ [ - "SQRDMLAH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahq_s16", + "name": "vqdmulh_n_s16", "arguments": [ - "int16x8_t a", - "int16x8_t b", - "int16x8_t c" + "int16x4_t a", + "int16_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vn.4H" }, "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.8H" + "register": "Vm.H[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLAH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahq_s32", + "name": "vqdmulh_n_s32", "arguments": [ - "int32x4_t a", - "int32x4_t b", - "int32x4_t c" + "int32x2_t a", + "int32_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.2S" }, "b": { - "register": "Vn.4S" + "register": "Vm.S[0]" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqdmulh_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" }, - "c": { - "register": "Vm.4S" + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLAH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahs_lane_s32", + "name": "vqdmulh_s32", "arguments": [ - "int32_t a", - "int32_t b", - "int32x2_t v", - "const int lane" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "int32_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Vn.2S" }, "b": { - "register": "Sn" + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqdmulhh_lane_s16", + "arguments": [ + "int16_t a", + "int16x4_t v", + "const int lane" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" }, "lane": { "minimum": 0, - "maximum": 1 + "maximum": 3 }, "v": { - "register": "Vm.2S" + "register": "Vm.4H" } }, "Architectures": [ @@ -69694,35 +272341,31 @@ ], "instructions": [ [ - "SQRDMLAH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahs_laneq_s32", + "name": "vqdmulhh_laneq_s16", "arguments": [ - "int32_t a", - "int32_t b", - "int32x4_t v", + "int16_t a", + "int16x8_t v", "const int lane" ], "return_type": { - "value": "int32_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" - }, - "b": { - "register": "Sn" + "register": "Hn" }, "lane": { "minimum": 0, - "maximum": 3 + "maximum": 7 }, "v": { - "register": "Vm.4S" + "register": "Vm.8H" } }, "Architectures": [ @@ -69730,30 +272373,26 @@ ], "instructions": [ [ - "SQRDMLAH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahs_s32", + "name": "vqdmulhh_s16", "arguments": [ - "int32_t a", - "int32_t b", - "int32_t c" + "int16_t a", + "int16_t b" ], "return_type": { - "value": "int32_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Hn" }, "b": { - "register": "Sn" - }, - "c": { - "register": "Sm" + "register": "Hm" } }, "Architectures": [ @@ -69761,28 +272400,24 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlsh_lane_s16", + "name": "vqdmulhq_lane_s16", "arguments": [ - "int16x4_t a", - "int16x4_t b", + "int16x8_t a", "int16x4_t v", "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.4H" + "register": "Vn.8H" }, "lane": { "minimum": 0, @@ -69793,32 +272428,30 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLSH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlsh_lane_s32", + "name": "vqdmulhq_lane_s32", "arguments": [ - "int32x2_t a", - "int32x2_t b", + "int32x4_t a", "int32x2_t v", "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.2S" + "register": "Vn.4S" }, "lane": { "minimum": 0, @@ -69829,32 +272462,30 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLSH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlsh_laneq_s16", + "name": "vqdmulhq_laneq_s16", "arguments": [ - "int16x4_t a", - "int16x4_t b", + "int16x8_t a", "int16x8_t v", "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.4H" + "register": "Vn.8H" }, "lane": { "minimum": 0, @@ -69869,28 +272500,24 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlsh_laneq_s32", + "name": "vqdmulhq_laneq_s32", "arguments": [ - "int32x2_t a", - "int32x2_t b", + "int32x4_t a", "int32x4_t v", "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.2S" + "register": "Vn.4S" }, "lane": { "minimum": 0, @@ -69905,97 +272532,147 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlsh_s16", + "name": "vqdmulhq_n_s16", "arguments": [ - "int16x4_t a", - "int16x4_t b", - "int16x4_t c" + "int16x8_t a", + "int16_t b" ], "return_type": { - "value": "int16x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.8H" }, "b": { - "register": "Vn.4H" - }, - "c": { - "register": "Vm.4H" + "register": "Vm.H[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLSH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlsh_s32", + "name": "vqdmulhq_n_s32", "arguments": [ - "int32x2_t a", - "int32x2_t b", - "int32x2_t c" + "int32x4_t a", + "int32_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.4S" }, "b": { - "register": "Vn.2S" + "register": "Vm.S[0]" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqdmulhq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" }, - "c": { - "register": "Vm.2S" + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLSH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlshh_lane_s16", + "name": "vqdmulhq_s32", "arguments": [ - "int16_t a", - "int16_t b", - "int16x4_t v", - "const int lane" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "int16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hd" + "register": "Vn.4S" }, "b": { - "register": "Hn" + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqdmulhs_lane_s32", + "arguments": [ + "int32_t a", + "int32x2_t v", + "const int lane" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" }, "lane": { "minimum": 0, - "maximum": 3 + "maximum": 1 }, "v": { - "register": "Vm.4H" + "register": "Vm.2S" } }, "Architectures": [ @@ -70003,35 +272680,31 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlshh_laneq_s16", + "name": "vqdmulhs_laneq_s32", "arguments": [ - "int16_t a", - "int16_t b", - "int16x8_t v", + "int32_t a", + "int32x4_t v", "const int lane" ], "return_type": { - "value": "int16_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Hd" - }, - "b": { - "register": "Hn" + "register": "Sn" }, "lane": { "minimum": 0, - "maximum": 7 + "maximum": 3 }, "v": { - "register": "Vm.8H" + "register": "Vm.4S" } }, "Architectures": [ @@ -70039,30 +272712,26 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlshh_s16", + "name": "vqdmulhs_s32", "arguments": [ - "int16_t a", - "int16_t b", - "int16_t c" + "int32_t a", + "int32_t b" ], "return_type": { - "value": "int16_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Hd" + "register": "Sn" }, "b": { - "register": "Hn" - }, - "c": { - "register": "Hm" + "register": "Sm" } }, "Architectures": [ @@ -70070,27 +272739,23 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlshq_lane_s16", + "name": "vqdmull_high_lane_s16", "arguments": [ "int16x8_t a", - "int16x8_t b", "int16x4_t v", "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { "register": "Vn.8H" }, "lane": { @@ -70106,27 +272771,23 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlshq_lane_s32", + "name": "vqdmull_high_lane_s32", "arguments": [ "int32x4_t a", - "int32x4_t b", "int32x2_t v", "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.4S" }, "lane": { @@ -70142,28 +272803,24 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlshq_laneq_s16", + "name": "vqdmull_high_laneq_s16", "arguments": [ "int16x8_t a", - "int16x8_t b", "int16x8_t v", "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.4H" + "register": "Vn.8H" }, "lane": { "minimum": 0, @@ -70178,28 +272835,24 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlshq_laneq_s32", + "name": "vqdmull_high_laneq_s32", "arguments": [ "int32x4_t a", - "int32x4_t b", "int32x4_t v", "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.2S" + "register": "Vn.4S" }, "lane": { "minimum": 0, @@ -70214,30 +272867,26 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlshq_s16", + "name": "vqdmull_high_n_s16", "arguments": [ "int16x8_t a", - "int16x8_t b", - "int16x8_t c" + "int16_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { "register": "Vn.8H" }, - "c": { - "register": "Vm.8H" + "b": { + "register": "Vm.H[0]" } }, "Architectures": [ @@ -70245,66 +272894,26 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlshq_s32", + "name": "vqdmull_high_n_s32", "arguments": [ "int32x4_t a", - "int32x4_t b", - "int32x4_t c" + "int32_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.4S" }, - "c": { - "register": "Vm.4S" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "SQRDMLSH" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vqrdmlshs_lane_s32", - "arguments": [ - "int32_t a", - "int32_t b", - "int32x2_t v", - "const int lane" - ], - "return_type": { - "value": "int32_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Sd" - }, "b": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vm.S[0]" } }, "Architectures": [ @@ -70312,35 +272921,26 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlshs_laneq_s32", + "name": "vqdmull_high_s16", "arguments": [ - "int32_t a", - "int32_t b", - "int32x4_t v", - "const int lane" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int32_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Vn.8H" }, "b": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vm.8H" } }, "Architectures": [ @@ -70348,30 +272948,26 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlshs_s32", + "name": "vqdmull_high_s32", "arguments": [ - "int32_t a", - "int32_t b", - "int32_t c" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "int32_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Vn.4S" }, "b": { - "register": "Sn" - }, - "c": { - "register": "Sm" + "register": "Vm.4S" } }, "Architectures": [ @@ -70379,20 +272975,20 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulh_lane_s16", + "name": "vqdmull_lane_s16", "arguments": [ "int16x4_t a", "int16x4_t v", "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -70413,20 +273009,20 @@ ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulh_lane_s32", + "name": "vqdmull_lane_s32", "arguments": [ "int32x2_t a", "int32x2_t v", "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { @@ -70447,20 +273043,20 @@ ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulh_laneq_s16", + "name": "vqdmull_laneq_s16", "arguments": [ "int16x4_t a", "int16x8_t v", "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -70479,20 +273075,20 @@ ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulh_laneq_s32", + "name": "vqdmull_laneq_s32", "arguments": [ "int32x2_t a", "int32x4_t v", "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { @@ -70511,19 +273107,19 @@ ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulh_n_s16", + "name": "vqdmull_n_s16", "arguments": [ "int16x4_t a", "int16_t b" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -70540,19 +273136,19 @@ ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulh_n_s32", + "name": "vqdmull_n_s32", "arguments": [ "int32x2_t a", "int32_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { @@ -70569,19 +273165,19 @@ ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulh_s16", + "name": "vqdmull_s16", "arguments": [ "int16x4_t a", "int16x4_t b" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -70598,19 +273194,19 @@ ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulh_s32", + "name": "vqdmull_s32", "arguments": [ "int32x2_t a", "int32x2_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { @@ -70627,20 +273223,20 @@ ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhh_lane_s16", + "name": "vqdmullh_lane_s16", "arguments": [ "int16_t a", "int16x4_t v", "const int lane" ], "return_type": { - "value": "int16_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { @@ -70659,20 +273255,20 @@ ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhh_laneq_s16", + "name": "vqdmullh_laneq_s16", "arguments": [ "int16_t a", "int16x8_t v", "const int lane" ], "return_type": { - "value": "int16_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { @@ -70691,19 +273287,19 @@ ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhh_s16", + "name": "vqdmullh_s16", "arguments": [ "int16_t a", "int16_t b" ], "return_type": { - "value": "int16_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { @@ -70718,99 +273314,117 @@ ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhq_lane_s16", + "name": "vqdmulls_lane_s32", "arguments": [ - "int16x8_t a", - "int16x4_t v", + "int32_t a", + "int32x2_t v", "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Sn" }, "lane": { "minimum": 0, - "maximum": 3 + "maximum": 1 }, "v": { - "register": "Vm.4H" + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhq_lane_s32", + "name": "vqdmulls_laneq_s32", "arguments": [ - "int32x4_t a", - "int32x2_t v", + "int32_t a", + "int32x4_t v", "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Sn" }, "lane": { "minimum": 0, - "maximum": 1 + "maximum": 3 }, "v": { - "register": "Vm.2S" + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhq_laneq_s16", + "name": "vqdmulls_s32", "arguments": [ - "int16x8_t a", - "int16x8_t v", - "const int lane" + "int32_t a", + "int32_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Sn" }, - "lane": { - "minimum": 0, - "maximum": 7 + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqmovn_high_s16", + "arguments": [ + "int8x8_t r", + "int16x8_t a" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" }, - "v": { - "register": "Vm.8H" + "r": { + "register": "Vd.8B" } }, "Architectures": [ @@ -70818,31 +273432,53 @@ ], "instructions": [ [ - "SQRDMULH" + "SQXTN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhq_laneq_s32", + "name": "vqmovn_high_s32", "arguments": [ - "int32x4_t a", - "int32x4_t v", - "const int lane" + "int16x4_t r", + "int32x4_t a" ], "return_type": { - "value": "int32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4S" }, - "lane": { - "minimum": 0, - "maximum": 3 + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTN2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqmovn_high_s64", + "arguments": [ + "int32x2_t r", + "int64x2_t a" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" }, - "v": { - "register": "Vm.4S" + "r": { + "register": "Vd.2S" } }, "Architectures": [ @@ -70850,113 +273486,103 @@ ], "instructions": [ [ - "SQRDMULH" + "SQXTN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhq_n_s16", + "name": "vqmovn_high_u16", "arguments": [ - "int16x8_t a", - "int16_t b" + "uint8x8_t r", + "uint16x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" }, - "b": { - "register": "Vm.H[0]" + "r": { + "register": "Vd.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRDMULH" + "UQXTN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhq_n_s32", + "name": "vqmovn_high_u32", "arguments": [ - "int32x4_t a", - "int32_t b" + "uint16x4_t r", + "uint32x4_t a" ], "return_type": { - "value": "int32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4S" }, - "b": { - "register": "Vm.S[0]" + "r": { + "register": "Vd.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRDMULH" + "UQXTN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhq_s16", + "name": "vqmovn_high_u64", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "uint32x2_t r", + "uint64x2_t a" ], "return_type": { - "value": "int16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2D" }, - "b": { - "register": "Vm.8H" + "r": { + "register": "Vd.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRDMULH" + "UQXTN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhq_s32", + "name": "vqmovn_s16", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int16x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vn.8H" } }, "Architectures": [ @@ -70966,117 +273592,97 @@ ], "instructions": [ [ - "SQRDMULH" + "SQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhs_lane_s32", + "name": "vqmovn_s32", "arguments": [ - "int32_t a", - "int32x2_t v", - "const int lane" + "int32x4_t a" ], "return_type": { - "value": "int32_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vn.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMULH" + "SQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhs_laneq_s32", + "name": "vqmovn_s64", "arguments": [ - "int32_t a", - "int32x4_t v", - "const int lane" + "int64x2_t a" ], "return_type": { - "value": "int32_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vn.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMULH" + "SQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhs_s32", + "name": "vqmovn_u16", "arguments": [ - "int32_t a", - "int32_t b" + "uint16x8_t a" ], "return_type": { - "value": "int32_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" - }, - "b": { - "register": "Sm" + "register": "Vn.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMULH" + "UQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshl_s16", + "name": "vqmovn_u32", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "uint32x4_t a" ], "return_type": { - "value": "int16x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" + "register": "Vn.4S" } }, "Architectures": [ @@ -71086,26 +273692,22 @@ ], "instructions": [ [ - "SQRSHL" + "UQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshl_s32", + "name": "vqmovn_u64", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "uint64x2_t a" ], "return_type": { - "value": "int32x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" + "register": "Vn.2D" } }, "Architectures": [ @@ -71115,200 +273717,164 @@ ], "instructions": [ [ - "SQRSHL" + "UQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshl_s64", + "name": "vqmovnd_s64", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "int64_t a" ], "return_type": { - "value": "int64x1_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { "register": "Dn" - }, - "b": { - "register": "Dm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRSHL" + "SQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshl_s8", + "name": "vqmovnd_u64", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "uint64_t a" ], "return_type": { - "value": "int8x8_t" + "value": "uint32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRSHL" + "UQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshl_u16", + "name": "vqmovnh_s16", "arguments": [ - "uint16x4_t a", - "int16x4_t b" + "int16_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "int8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" + "register": "Hn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQRSHL" + "SQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshl_u32", + "name": "vqmovnh_u16", "arguments": [ - "uint32x2_t a", - "int32x2_t b" + "uint16_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "uint8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" + "register": "Hn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQRSHL" + "UQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshl_u64", + "name": "vqmovns_s32", "arguments": [ - "uint64x1_t a", - "int64x1_t b" + "int32_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "b": { - "register": "Dm" + "register": "Sn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQRSHL" + "SQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshl_u8", + "name": "vqmovns_u32", "arguments": [ - "uint8x8_t a", - "int8x8_t b" + "uint32_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "uint16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "register": "Sn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQRSHL" + "UQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlb_s8", + "name": "vqmovun_high_s16", "arguments": [ - "int8_t a", - "int8_t b" + "uint8x8_t r", + "int16x8_t a" ], "return_type": { - "value": "int8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Bn" + "register": "Vn.8H" }, - "b": { - "register": "Bm" + "r": { + "register": "Vd.8B" } }, "Architectures": [ @@ -71316,26 +273882,26 @@ ], "instructions": [ [ - "SQRSHL" + "SQXTUN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlb_u8", + "name": "vqmovun_high_s32", "arguments": [ - "uint8_t a", - "int8_t b" + "uint16x4_t r", + "int32x4_t a" ], "return_type": { - "value": "uint8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Bn" + "register": "Vn.4S" }, - "b": { - "register": "Bm" + "r": { + "register": "Vd.4H" } }, "Architectures": [ @@ -71343,26 +273909,26 @@ ], "instructions": [ [ - "UQRSHL" + "SQXTUN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshld_s64", + "name": "vqmovun_high_s64", "arguments": [ - "int64_t a", - "int64_t b" + "uint32x2_t r", + "int64x2_t a" ], "return_type": { - "value": "int64_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.2D" }, - "b": { - "register": "Dm" + "r": { + "register": "Vd.2S" } }, "Architectures": [ @@ -71370,194 +273936,166 @@ ], "instructions": [ [ - "SQRSHL" + "SQXTUN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshld_u64", + "name": "vqmovun_s16", "arguments": [ - "uint64_t a", - "int64_t b" + "int16x8_t a" ], "return_type": { - "value": "uint64_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "b": { - "register": "Dm" + "register": "Vn.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQRSHL" + "SQXTUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlh_s16", + "name": "vqmovun_s32", "arguments": [ - "int16_t a", - "int16_t b" + "int32x4_t a" ], "return_type": { - "value": "int16_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" - }, - "b": { - "register": "Hm" + "register": "Vn.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRSHL" + "SQXTUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlh_u16", + "name": "vqmovun_s64", "arguments": [ - "uint16_t a", - "int16_t b" + "int64x2_t a" ], "return_type": { - "value": "uint16_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" - }, - "b": { - "register": "Hm" + "register": "Vn.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQRSHL" + "SQXTUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlq_s16", + "name": "vqmovund_s64", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "int64_t a" ], "return_type": { - "value": "int16x8_t" + "value": "uint32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRSHL" + "SQXTUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlq_s32", + "name": "vqmovunh_s16", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int16_t a" ], "return_type": { - "value": "int32x4_t" + "value": "uint8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Hn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRSHL" + "SQXTUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlq_s64", + "name": "vqmovuns_s32", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "int32_t a" ], "return_type": { - "value": "int64x2_t" + "value": "uint16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Sn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRSHL" + "SQXTUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlq_s8", + "name": "vqneg_s16", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "int16x4_t a" ], "return_type": { - "value": "int8x16_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" + "register": "Vn.4H" } }, "Architectures": [ @@ -71567,26 +274105,22 @@ ], "instructions": [ [ - "SQRSHL" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlq_u16", + "name": "vqneg_s32", "arguments": [ - "uint16x8_t a", - "int16x8_t b" + "int32x2_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Vn.2S" } }, "Architectures": [ @@ -71596,55 +274130,45 @@ ], "instructions": [ [ - "UQRSHL" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlq_u32", + "name": "vqneg_s64", "arguments": [ - "uint32x4_t a", - "int32x4_t b" + "int64x1_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQRSHL" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlq_u64", + "name": "vqneg_s8", "arguments": [ - "uint64x2_t a", - "int64x2_t b" + "int8x8_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Vn.8B" } }, "Architectures": [ @@ -71654,55 +274178,45 @@ ], "instructions": [ [ - "UQRSHL" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlq_u8", + "name": "vqnegb_s8", "arguments": [ - "uint8x16_t a", - "int8x16_t b" + "int8_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" + "register": "Bn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQRSHL" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshls_s32", + "name": "vqnegd_s64", "arguments": [ - "int32_t a", - "int32_t b" + "int64_t a" ], "return_type": { - "value": "int32_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" - }, - "b": { - "register": "Sm" + "register": "Dn" } }, "Architectures": [ @@ -71710,26 +274224,22 @@ ], "instructions": [ [ - "SQRSHL" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshls_u32", + "name": "vqnegh_s16", "arguments": [ - "uint32_t a", - "int32_t b" + "int16_t a" ], "return_type": { - "value": "uint32_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" - }, - "b": { - "register": "Sm" + "register": "Hn" } }, "Architectures": [ @@ -71737,95 +274247,72 @@ ], "instructions": [ [ - "UQRSHL" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_high_n_s16", + "name": "vqnegq_s16", "arguments": [ - "int8x8_t r", - "int16x8_t a", - "const int n" + "int16x8_t a" ], "return_type": { - "value": "int8x16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" - }, - "n": { - "minimum": 1, - "maximum": 8 - }, - "r": { - "register": "Vd.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRSHRN2" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_high_n_s32", + "name": "vqnegq_s32", "arguments": [ - "int16x4_t r", - "int32x4_t a", - "const int n" + "int32x4_t a" ], "return_type": { - "value": "int16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4S" - }, - "n": { - "minimum": 1, - "maximum": 16 - }, - "r": { - "register": "Vd.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRSHRN2" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_high_n_s64", + "name": "vqnegq_s64", "arguments": [ - "int32x2_t r", - "int64x2_t a", - "const int n" + "int64x2_t a" ], "return_type": { - "value": "int32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2D" - }, - "n": { - "minimum": 1, - "maximum": 32 - }, - "r": { - "register": "Vd.2S" } }, "Architectures": [ @@ -71833,63 +274320,47 @@ ], "instructions": [ [ - "SQRSHRN2" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_high_n_u16", + "name": "vqnegq_s8", "arguments": [ - "uint8x8_t r", - "uint16x8_t a", - "const int n" + "int8x16_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "n": { - "minimum": 1, - "maximum": 8 - }, - "r": { - "register": "Vd.8B" + "register": "Vn.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQRSHRN2" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_high_n_u32", + "name": "vqnegs_s32", "arguments": [ - "uint16x4_t r", - "uint32x4_t a", - "const int n" + "int32_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "n": { - "minimum": 1, - "maximum": 16 - }, - "r": { - "register": "Vd.4H" + "register": "Sn" } }, "Architectures": [ @@ -71897,31 +274368,35 @@ ], "instructions": [ [ - "UQRSHRN2" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_high_n_u64", + "name": "vqrdmlah_lane_s16", "arguments": [ - "uint32x2_t r", - "uint64x2_t a", - "const int n" + "int16x4_t a", + "int16x4_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.4H" }, - "n": { - "minimum": 1, - "maximum": 32 + "b": { + "register": "Vn.4H" }, - "r": { - "register": "Vd.2S" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -71929,207 +274404,241 @@ ], "instructions": [ [ - "UQRSHRN2" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_n_s16", + "name": "vqrdmlah_lane_s32", "arguments": [ - "int16x8_t a", - "const int n" + "int32x2_t a", + "int32x2_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.2S" }, - "n": { - "minimum": 1, - "maximum": 8 + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRSHRN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_n_s32", + "name": "vqrdmlah_laneq_s16", "arguments": [ - "int32x4_t a", - "const int n" + "int16x4_t a", + "int16x4_t b", + "int16x8_t v", + "const int lane" ], "return_type": { "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.4H" }, - "n": { - "minimum": 1, - "maximum": 16 + "b": { + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRSHRN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_n_s64", + "name": "vqrdmlah_laneq_s32", "arguments": [ - "int64x2_t a", - "const int n" + "int32x2_t a", + "int32x2_t b", + "int32x4_t v", + "const int lane" ], "return_type": { "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.2S" }, - "n": { - "minimum": 1, - "maximum": 32 + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRSHRN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_n_u16", + "name": "vqrdmlah_s16", "arguments": [ - "uint16x8_t a", - "const int n" + "int16x4_t a", + "int16x4_t b", + "int16x4_t c" ], "return_type": { - "value": "uint8x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.4H" }, - "n": { - "minimum": 1, - "maximum": 8 + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQRSHRN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_n_u32", + "name": "vqrdmlah_s32", "arguments": [ - "uint32x4_t a", - "const int n" + "int32x2_t a", + "int32x2_t b", + "int32x2_t c" ], "return_type": { - "value": "uint16x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.2S" }, - "n": { - "minimum": 1, - "maximum": 16 + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQRSHRN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_n_u64", + "name": "vqrdmlahh_lane_s16", "arguments": [ - "uint64x2_t a", - "const int n" + "int16_t a", + "int16_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Hd" }, - "n": { - "minimum": 1, - "maximum": 32 + "b": { + "register": "Hn" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQRSHRN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrnd_n_s64", + "name": "vqrdmlahh_laneq_s16", "arguments": [ - "int64_t a", - "const int n" + "int16_t a", + "int16_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "int32_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Hd" }, - "n": { - "minimum": 1, - "maximum": 32 + "b": { + "register": "Hn" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ @@ -72137,27 +274646,30 @@ ], "instructions": [ [ - "SQRSHRN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrnd_n_u64", + "name": "vqrdmlahh_s16", "arguments": [ - "uint64_t a", - "const int n" + "int16_t a", + "int16_t b", + "int16_t c" ], "return_type": { - "value": "uint32_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Hd" }, - "n": { - "minimum": 1, - "maximum": 32 + "b": { + "register": "Hn" + }, + "c": { + "register": "Hm" } }, "Architectures": [ @@ -72165,27 +274677,35 @@ ], "instructions": [ [ - "UQRSHRN" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrnh_n_s16", + "name": "vqrdmlahq_lane_s16", "arguments": [ - "int16_t a", - "const int n" + "int16x8_t a", + "int16x8_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "int8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.8H" }, - "n": { - "minimum": 1, - "maximum": 8 + "b": { + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -72193,27 +274713,35 @@ ], "instructions": [ [ - "SQRSHRN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrnh_n_u16", + "name": "vqrdmlahq_lane_s32", "arguments": [ - "uint16_t a", - "const int n" + "int32x4_t a", + "int32x4_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "uint8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.4S" }, - "n": { - "minimum": 1, - "maximum": 8 + "b": { + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -72221,27 +274749,35 @@ ], "instructions": [ [ - "UQRSHRN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrns_n_s32", + "name": "vqrdmlahq_laneq_s16", "arguments": [ - "int32_t a", - "const int n" + "int16x8_t a", + "int16x8_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "int16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vd.4H" }, - "n": { - "minimum": 1, - "maximum": 16 + "b": { + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ @@ -72249,27 +274785,35 @@ ], "instructions": [ [ - "SQRSHRN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrns_n_u32", + "name": "vqrdmlahq_laneq_s32", "arguments": [ - "uint32_t a", - "const int n" + "int32x4_t a", + "int32x4_t b", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "uint16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vd.2S" }, - "n": { - "minimum": 1, - "maximum": 16 + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ @@ -72277,31 +274821,30 @@ ], "instructions": [ [ - "UQRSHRN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrun_high_n_s16", + "name": "vqrdmlahq_s16", "arguments": [ - "uint8x8_t r", "int16x8_t a", - "const int n" + "int16x8_t b", + "int16x8_t c" ], "return_type": { - "value": "uint8x16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.8H" }, - "n": { - "minimum": 1, - "maximum": 8 + "b": { + "register": "Vn.8H" }, - "r": { - "register": "Vd.8B" + "c": { + "register": "Vm.8H" } }, "Architectures": [ @@ -72309,31 +274852,30 @@ ], "instructions": [ [ - "SQRSHRUN2" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrun_high_n_s32", + "name": "vqrdmlahq_s32", "arguments": [ - "uint16x4_t r", "int32x4_t a", - "const int n" + "int32x4_t b", + "int32x4_t c" ], "return_type": { - "value": "uint16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.4S" }, - "n": { - "minimum": 1, - "maximum": 16 + "b": { + "register": "Vn.4S" }, - "r": { - "register": "Vd.4H" + "c": { + "register": "Vm.4S" } }, "Architectures": [ @@ -72341,31 +274883,35 @@ ], "instructions": [ [ - "SQRSHRUN2" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrun_high_n_s64", + "name": "vqrdmlahs_lane_s32", "arguments": [ - "uint32x2_t r", - "int64x2_t a", - "const int n" + "int32_t a", + "int32_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Sd" }, - "n": { - "minimum": 1, - "maximum": 32 + "b": { + "register": "Sn" }, - "r": { - "register": "Vd.2S" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -72373,117 +274919,138 @@ ], "instructions": [ [ - "SQRSHRUN2" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrun_n_s16", + "name": "vqrdmlahs_laneq_s32", "arguments": [ - "int16x8_t a", - "const int n" + "int32_t a", + "int32_t b", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "uint8x8_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Sd" }, - "n": { - "minimum": 1, - "maximum": 8 + "b": { + "register": "Sn" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRSHRUN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrun_n_s32", + "name": "vqrdmlahs_s32", "arguments": [ - "int32x4_t a", - "const int n" + "int32_t a", + "int32_t b", + "int32_t c" ], "return_type": { - "value": "uint16x4_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Sd" }, - "n": { - "minimum": 1, - "maximum": 16 + "b": { + "register": "Sn" + }, + "c": { + "register": "Sm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRSHRUN" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrun_n_s64", + "name": "vqrdmlsh_lane_s16", "arguments": [ - "int64x2_t a", - "const int n" + "int16x4_t a", + "int16x4_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.4H" }, - "n": { - "minimum": 1, - "maximum": 32 + "b": { + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRSHRUN" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrund_n_s64", + "name": "vqrdmlsh_lane_s32", "arguments": [ - "int64_t a", - "const int n" + "int32x2_t a", + "int32x2_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "uint32_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.2S" }, - "n": { - "minimum": 1, - "maximum": 32 + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -72491,27 +275058,35 @@ ], "instructions": [ [ - "SQRSHRUN" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrunh_n_s16", + "name": "vqrdmlsh_laneq_s16", "arguments": [ - "int16_t a", - "const int n" + "int16x4_t a", + "int16x4_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "uint8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.4H" }, - "n": { - "minimum": 1, - "maximum": 8 + "b": { + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ @@ -72519,27 +275094,35 @@ ], "instructions": [ [ - "SQRSHRUN" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshruns_n_s32", + "name": "vqrdmlsh_laneq_s32", "arguments": [ - "int32_t a", - "const int n" + "int32x2_t a", + "int32x2_t b", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "uint16_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vd.2S" }, - "n": { - "minimum": 1, - "maximum": 16 + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ @@ -72547,440 +275130,505 @@ ], "instructions": [ [ - "SQRSHRUN" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_n_s16", + "name": "vqrdmlsh_s16", "arguments": [ "int16x4_t a", - "const int n" + "int16x4_t b", + "int16x4_t c" ], "return_type": { "value": "int16x4_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.4H" + }, + "b": { "register": "Vn.4H" }, - "n": { - "minimum": 0, - "maximum": 15 + "c": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_n_s32", + "name": "vqrdmlsh_s32", "arguments": [ "int32x2_t a", - "const int n" + "int32x2_t b", + "int32x2_t c" ], "return_type": { "value": "int32x2_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.2S" + }, + "b": { "register": "Vn.2S" }, - "n": { - "minimum": 0, - "maximum": 31 + "c": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_n_s64", + "name": "vqrdmlshh_lane_s16", "arguments": [ - "int64x1_t a", - "const int n" + "int16_t a", + "int16_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "int64x1_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Hd" }, - "n": { + "b": { + "register": "Hn" + }, + "lane": { "minimum": 0, - "maximum": 63 + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_n_s8", + "name": "vqrdmlshh_laneq_s16", "arguments": [ - "int8x8_t a", - "const int n" + "int16_t a", + "int16_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Hd" }, - "n": { + "b": { + "register": "Hn" + }, + "lane": { "minimum": 0, "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_n_u16", + "name": "vqrdmlshh_s16", "arguments": [ - "uint16x4_t a", - "const int n" + "int16_t a", + "int16_t b", + "int16_t c" ], "return_type": { - "value": "uint16x4_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Hd" }, - "n": { - "minimum": 0, - "maximum": 15 + "b": { + "register": "Hn" + }, + "c": { + "register": "Hm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_n_u32", + "name": "vqrdmlshq_lane_s16", "arguments": [ - "uint32x2_t a", - "const int n" + "int16x8_t a", + "int16x8_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.8H" }, - "n": { + "b": { + "register": "Vn.8H" + }, + "lane": { "minimum": 0, - "maximum": 31 + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_n_u64", + "name": "vqrdmlshq_lane_s32", "arguments": [ - "uint64x1_t a", - "const int n" + "int32x4_t a", + "int32x4_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.4S" }, - "n": { + "b": { + "register": "Vn.4S" + }, + "lane": { "minimum": 0, - "maximum": 63 + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_n_u8", + "name": "vqrdmlshq_laneq_s16", "arguments": [ - "uint8x8_t a", - "const int n" + "int16x8_t a", + "int16x8_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "uint8x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vd.4H" }, - "n": { + "b": { + "register": "Vn.4H" + }, + "lane": { "minimum": 0, "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_s16", + "name": "vqrdmlshq_laneq_s32", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "int32x4_t a", + "int32x4_t b", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.2S" }, "b": { - "register": "Vm.4H" + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_s32", + "name": "vqrdmlshq_s16", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "int16x8_t a", + "int16x8_t b", + "int16x8_t c" ], "return_type": { - "value": "int32x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.8H" }, "b": { - "register": "Vm.2S" + "register": "Vn.8H" + }, + "c": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_s64", + "name": "vqrdmlshq_s32", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "int32x4_t a", + "int32x4_t b", + "int32x4_t c" ], "return_type": { - "value": "int64x1_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.4S" }, "b": { - "register": "Dm" + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_s8", + "name": "vqrdmlshs_lane_s32", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "int32_t a", + "int32_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Sd" }, "b": { - "register": "Vm.8B" + "register": "Sn" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_u16", + "name": "vqrdmlshs_laneq_s32", "arguments": [ - "uint16x4_t a", - "int16x4_t b" + "int32_t a", + "int32_t b", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "uint16x4_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Sd" }, "b": { - "register": "Vm.4H" + "register": "Sn" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_u32", + "name": "vqrdmlshs_s32", "arguments": [ - "uint32x2_t a", - "int32x2_t b" + "int32_t a", + "int32_t b", + "int32_t c" ], "return_type": { - "value": "uint32x2_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Sd" }, "b": { - "register": "Vm.2S" + "register": "Sn" + }, + "c": { + "register": "Sm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_u64", + "name": "vqrdmulh_lane_s16", "arguments": [ - "uint64x1_t a", - "int64x1_t b" + "int16x4_t a", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint64x1_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.4H" }, - "b": { - "register": "Dm" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -72990,26 +275638,31 @@ ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_u8", + "name": "vqrdmulh_lane_s32", "arguments": [ - "uint8x8_t a", - "int8x8_t b" + "int32x2_t a", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "uint8x8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.2S" }, - "b": { - "register": "Vm.8B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -73019,27 +275672,31 @@ ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlb_n_s8", + "name": "vqrdmulh_laneq_s16", "arguments": [ - "int8_t a", - "const int n" + "int16x4_t a", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "int8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Bn" + "register": "Vn.4H" }, - "n": { + "lane": { "minimum": 0, "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ @@ -73047,27 +275704,31 @@ ], "instructions": [ [ - "SQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlb_n_u8", + "name": "vqrdmulh_laneq_s32", "arguments": [ - "uint8_t a", - "const int n" + "int32x2_t a", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "uint8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Bn" + "register": "Vn.2S" }, - "n": { + "lane": { "minimum": 0, - "maximum": 7 + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ @@ -73075,136 +275736,147 @@ ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlb_s8", + "name": "vqrdmulh_n_s16", "arguments": [ - "int8_t a", - "int8_t b" + "int16x4_t a", + "int16_t b" ], "return_type": { - "value": "int8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Bn" + "register": "Vn.4H" }, "b": { - "register": "Bm" + "register": "Vm.H[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlb_u8", + "name": "vqrdmulh_n_s32", "arguments": [ - "uint8_t a", - "int8_t b" + "int32x2_t a", + "int32_t b" ], "return_type": { - "value": "uint8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Bn" + "register": "Vn.2S" }, "b": { - "register": "Bm" + "register": "Vm.S[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshld_n_s64", + "name": "vqrdmulh_s16", "arguments": [ - "int64_t a", - "const int n" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int64_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.4H" }, - "n": { - "minimum": 0, - "maximum": 63 + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshld_n_u64", + "name": "vqrdmulh_s32", "arguments": [ - "uint64_t a", - "const int n" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "uint64_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.2S" }, - "n": { - "minimum": 0, - "maximum": 63 + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshld_s64", + "name": "vqrdmulhh_lane_s16", "arguments": [ - "int64_t a", - "int64_t b" + "int16_t a", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "int64_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Hn" }, - "b": { - "register": "Dm" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -73212,26 +275884,31 @@ ], "instructions": [ [ - "SQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshld_u64", + "name": "vqrdmulhh_laneq_s16", "arguments": [ - "uint64_t a", - "int64_t b" + "int16_t a", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "uint64_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Hn" }, - "b": { - "register": "Dm" + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ @@ -73239,16 +275916,16 @@ ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlh_n_s16", + "name": "vqrdmulhh_s16", "arguments": [ "int16_t a", - "const int n" + "int16_t b" ], "return_type": { "value": "int16_t" @@ -73257,9 +275934,8 @@ "a": { "register": "Hn" }, - "n": { - "minimum": 0, - "maximum": 15 + "b": { + "register": "Hm" } }, "Architectures": [ @@ -73267,81 +275943,99 @@ ], "instructions": [ [ - "SQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlh_n_u16", + "name": "vqrdmulhq_lane_s16", "arguments": [ - "uint16_t a", - "const int n" + "int16x8_t a", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vn.8H" }, - "n": { + "lane": { "minimum": 0, - "maximum": 15 + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlh_s16", + "name": "vqrdmulhq_lane_s32", "arguments": [ - "int16_t a", - "int16_t b" + "int32x4_t a", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "int16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vn.4S" }, - "b": { - "register": "Hm" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlh_u16", + "name": "vqrdmulhq_laneq_s16", "arguments": [ - "uint16_t a", - "int16_t b" + "int16x8_t a", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "uint16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vn.8H" }, - "b": { - "register": "Hm" + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ @@ -73349,57 +276043,58 @@ ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_n_s16", + "name": "vqrdmulhq_laneq_s32", "arguments": [ - "int16x8_t a", - "const int n" + "int32x4_t a", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4S" }, - "n": { + "lane": { "minimum": 0, - "maximum": 15 + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_n_s32", + "name": "vqrdmulhq_n_s16", "arguments": [ - "int32x4_t a", - "const int n" + "int16x8_t a", + "int16_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.8H" }, - "n": { - "minimum": 0, - "maximum": 31 + "b": { + "register": "Vm.H[0]" } }, "Architectures": [ @@ -73409,27 +276104,26 @@ ], "instructions": [ [ - "SQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_n_s64", + "name": "vqrdmulhq_n_s32", "arguments": [ - "int64x2_t a", - "const int n" + "int32x4_t a", + "int32_t b" ], "return_type": { - "value": "int64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.4S" }, - "n": { - "minimum": 0, - "maximum": 63 + "b": { + "register": "Vm.S[0]" } }, "Architectures": [ @@ -73439,27 +276133,26 @@ ], "instructions": [ [ - "SQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_n_s8", + "name": "vqrdmulhq_s16", "arguments": [ - "int8x16_t a", - "const int n" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int8x16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, - "n": { - "minimum": 0, - "maximum": 7 + "b": { + "register": "Vm.8H" } }, "Architectures": [ @@ -73469,27 +276162,26 @@ ], "instructions": [ [ - "SQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_n_u16", + "name": "vqrdmulhq_s32", "arguments": [ - "uint16x8_t a", - "const int n" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4S" }, - "n": { - "minimum": 0, - "maximum": 15 + "b": { + "register": "Vm.4S" } }, "Architectures": [ @@ -73499,116 +276191,117 @@ ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_n_u32", + "name": "vqrdmulhs_lane_s32", "arguments": [ - "uint32x4_t a", - "const int n" + "int32_t a", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Sn" }, - "n": { + "lane": { "minimum": 0, - "maximum": 31 + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_n_u64", + "name": "vqrdmulhs_laneq_s32", "arguments": [ - "uint64x2_t a", - "const int n" + "int32_t a", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "uint64x2_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Sn" }, - "n": { + "lane": { "minimum": 0, - "maximum": 63 + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_n_u8", + "name": "vqrdmulhs_s32", "arguments": [ - "uint8x16_t a", - "const int n" + "int32_t a", + "int32_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Sn" }, - "n": { - "minimum": 0, - "maximum": 7 + "b": { + "register": "Sm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_s16", + "name": "vqrshl_s16", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" }, "b": { - "register": "Vm.8H" + "register": "Vm.4H" } }, "Architectures": [ @@ -73618,26 +276311,26 @@ ], "instructions": [ [ - "SQSHL" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_s32", + "name": "vqrshl_s32", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2S" }, "b": { - "register": "Vm.4S" + "register": "Vm.2S" } }, "Architectures": [ @@ -73647,26 +276340,26 @@ ], "instructions": [ [ - "SQSHL" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_s64", + "name": "vqrshl_s64", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "int64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "int64x2_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Dn" }, "b": { - "register": "Vm.2D" + "register": "Dm" } }, "Architectures": [ @@ -73676,26 +276369,26 @@ ], "instructions": [ [ - "SQSHL" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_s8", + "name": "vqrshl_s8", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "int8x16_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8B" }, "b": { - "register": "Vm.16B" + "register": "Vm.8B" } }, "Architectures": [ @@ -73705,26 +276398,26 @@ ], "instructions": [ [ - "SQSHL" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_u16", + "name": "vqrshl_u16", "arguments": [ - "uint16x8_t a", - "int16x8_t b" + "uint16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" }, "b": { - "register": "Vm.8H" + "register": "Vm.4H" } }, "Architectures": [ @@ -73734,26 +276427,26 @@ ], "instructions": [ [ - "UQSHL" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_u32", + "name": "vqrshl_u32", "arguments": [ - "uint32x4_t a", - "int32x4_t b" + "uint32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2S" }, "b": { - "register": "Vm.4S" + "register": "Vm.2S" } }, "Architectures": [ @@ -73763,26 +276456,26 @@ ], "instructions": [ [ - "UQSHL" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_u64", + "name": "vqrshl_u64", "arguments": [ - "uint64x2_t a", - "int64x2_t b" + "uint64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Dn" }, "b": { - "register": "Vm.2D" + "register": "Dm" } }, "Architectures": [ @@ -73792,26 +276485,26 @@ ], "instructions": [ [ - "UQSHL" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_u8", + "name": "vqrshl_u8", "arguments": [ - "uint8x16_t a", - "int8x16_t b" + "uint8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8B" }, "b": { - "register": "Vm.16B" + "register": "Vm.8B" } }, "Architectures": [ @@ -73821,27 +276514,26 @@ ], "instructions": [ [ - "UQSHL" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshls_n_s32", + "name": "vqrshlb_s8", "arguments": [ - "int32_t a", - "const int n" + "int8_t a", + "int8_t b" ], "return_type": { - "value": "int32_t" + "value": "int8_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Bn" }, - "n": { - "minimum": 0, - "maximum": 31 + "b": { + "register": "Bm" } }, "Architectures": [ @@ -73849,27 +276541,26 @@ ], "instructions": [ [ - "SQSHL" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshls_n_u32", + "name": "vqrshlb_u8", "arguments": [ - "uint32_t a", - "const int n" + "uint8_t a", + "int8_t b" ], "return_type": { - "value": "uint32_t" + "value": "uint8_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Bn" }, - "n": { - "minimum": 0, - "maximum": 31 + "b": { + "register": "Bm" } }, "Architectures": [ @@ -73877,26 +276568,26 @@ ], "instructions": [ [ - "UQSHL" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshls_s32", + "name": "vqrshld_s64", "arguments": [ - "int32_t a", - "int32_t b" + "int64_t a", + "int64_t b" ], "return_type": { - "value": "int32_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Dn" }, "b": { - "register": "Sm" + "register": "Dm" } }, "Architectures": [ @@ -73904,26 +276595,26 @@ ], "instructions": [ [ - "SQSHL" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshls_u32", + "name": "vqrshld_u64", "arguments": [ - "uint32_t a", - "int32_t b" + "uint64_t a", + "int64_t b" ], "return_type": { - "value": "uint32_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Dn" }, "b": { - "register": "Sm" + "register": "Dm" } }, "Architectures": [ @@ -73931,87 +276622,80 @@ ], "instructions": [ [ - "UQSHL" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlu_n_s16", + "name": "vqrshlh_s16", "arguments": [ - "int16x4_t a", - "const int n" + "int16_t a", + "int16_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Hn" }, - "n": { - "minimum": 0, - "maximum": 15 + "b": { + "register": "Hm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHLU" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlu_n_s32", + "name": "vqrshlh_u16", "arguments": [ - "int32x2_t a", - "const int n" + "uint16_t a", + "int16_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "uint16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Hn" }, - "n": { - "minimum": 0, - "maximum": 31 + "b": { + "register": "Hm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHLU" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlu_n_s64", + "name": "vqrshlq_s16", "arguments": [ - "int64x1_t a", - "const int n" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "uint64x1_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.8H" }, - "n": { - "minimum": 0, - "maximum": 63 + "b": { + "register": "Vm.8H" } }, "Architectures": [ @@ -74021,27 +276705,26 @@ ], "instructions": [ [ - "SQSHLU" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlu_n_s8", + "name": "vqrshlq_s32", "arguments": [ - "int8x8_t a", - "const int n" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4S" }, - "n": { - "minimum": 0, - "maximum": 7 + "b": { + "register": "Vm.4S" } }, "Architectures": [ @@ -74051,111 +276734,113 @@ ], "instructions": [ [ - "SQSHLU" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlub_n_s8", + "name": "vqrshlq_s64", "arguments": [ - "int8_t a", - "const int n" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "uint8_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Bn" + "register": "Vn.2D" }, - "n": { - "minimum": 0, - "maximum": 7 + "b": { + "register": "Vm.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQSHLU" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlud_n_s64", + "name": "vqrshlq_s8", "arguments": [ - "int64_t a", - "const int n" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "uint64_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.16B" }, - "n": { - "minimum": 0, - "maximum": 63 + "b": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQSHLU" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshluh_n_s16", + "name": "vqrshlq_u16", "arguments": [ - "int16_t a", - "const int n" + "uint16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "uint16_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vn.8H" }, - "n": { - "minimum": 0, - "maximum": 15 + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQSHLU" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshluq_n_s16", + "name": "vqrshlq_u32", "arguments": [ - "int16x8_t a", - "const int n" + "uint32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4S" }, - "n": { - "minimum": 0, - "maximum": 15 + "b": { + "register": "Vm.4S" } }, "Architectures": [ @@ -74165,27 +276850,26 @@ ], "instructions": [ [ - "SQSHLU" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshluq_n_s32", + "name": "vqrshlq_u64", "arguments": [ - "int32x4_t a", - "const int n" + "uint64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2D" }, - "n": { - "minimum": 0, - "maximum": 31 + "b": { + "register": "Vm.2D" } }, "Architectures": [ @@ -74195,27 +276879,26 @@ ], "instructions": [ [ - "SQSHLU" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshluq_n_s64", + "name": "vqrshlq_u8", "arguments": [ - "int64x2_t a", - "const int n" + "uint8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.16B" }, - "n": { - "minimum": 0, - "maximum": 63 + "b": { + "register": "Vm.16B" } }, "Architectures": [ @@ -74225,46 +276908,43 @@ ], "instructions": [ [ - "SQSHLU" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshluq_n_s8", + "name": "vqrshls_s32", "arguments": [ - "int8x16_t a", - "const int n" + "int32_t a", + "int32_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Sn" }, - "n": { - "minimum": 0, - "maximum": 7 + "b": { + "register": "Sm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHLU" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlus_n_s32", + "name": "vqrshls_u32", "arguments": [ - "int32_t a", - "const int n" + "uint32_t a", + "int32_t b" ], "return_type": { "value": "uint32_t" @@ -74273,9 +276953,8 @@ "a": { "register": "Sn" }, - "n": { - "minimum": 0, - "maximum": 31 + "b": { + "register": "Sm" } }, "Architectures": [ @@ -74283,13 +276962,13 @@ ], "instructions": [ [ - "SQSHLU" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_high_n_s16", + "name": "vqrshrn_high_n_s16", "arguments": [ "int8x8_t r", "int16x8_t a", @@ -74315,13 +276994,13 @@ ], "instructions": [ [ - "SQSHRN2" + "SQRSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_high_n_s32", + "name": "vqrshrn_high_n_s32", "arguments": [ "int16x4_t r", "int32x4_t a", @@ -74347,13 +277026,13 @@ ], "instructions": [ [ - "SQSHRN2" + "SQRSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_high_n_s64", + "name": "vqrshrn_high_n_s64", "arguments": [ "int32x2_t r", "int64x2_t a", @@ -74379,13 +277058,13 @@ ], "instructions": [ [ - "SQSHRN2" + "SQRSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_high_n_u16", + "name": "vqrshrn_high_n_u16", "arguments": [ "uint8x8_t r", "uint16x8_t a", @@ -74411,13 +277090,13 @@ ], "instructions": [ [ - "UQSHRN2" + "UQRSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_high_n_u32", + "name": "vqrshrn_high_n_u32", "arguments": [ "uint16x4_t r", "uint32x4_t a", @@ -74443,13 +277122,13 @@ ], "instructions": [ [ - "UQSHRN2" + "UQRSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_high_n_u64", + "name": "vqrshrn_high_n_u64", "arguments": [ "uint32x2_t r", "uint64x2_t a", @@ -74475,13 +277154,13 @@ ], "instructions": [ [ - "UQSHRN2" + "UQRSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_n_s16", + "name": "vqrshrn_n_s16", "arguments": [ "int16x8_t a", "const int n" @@ -74505,13 +277184,13 @@ ], "instructions": [ [ - "SQSHRN" + "SQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_n_s32", + "name": "vqrshrn_n_s32", "arguments": [ "int32x4_t a", "const int n" @@ -74535,13 +277214,13 @@ ], "instructions": [ [ - "SQSHRN" + "SQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_n_s64", + "name": "vqrshrn_n_s64", "arguments": [ "int64x2_t a", "const int n" @@ -74565,13 +277244,13 @@ ], "instructions": [ [ - "SQSHRN" + "SQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_n_u16", + "name": "vqrshrn_n_u16", "arguments": [ "uint16x8_t a", "const int n" @@ -74595,13 +277274,13 @@ ], "instructions": [ [ - "UQSHRN" + "UQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_n_u32", + "name": "vqrshrn_n_u32", "arguments": [ "uint32x4_t a", "const int n" @@ -74625,13 +277304,13 @@ ], "instructions": [ [ - "UQSHRN" + "UQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_n_u64", + "name": "vqrshrn_n_u64", "arguments": [ "uint64x2_t a", "const int n" @@ -74655,13 +277334,13 @@ ], "instructions": [ [ - "UQSHRN" + "UQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrnd_n_s64", + "name": "vqrshrnd_n_s64", "arguments": [ "int64_t a", "const int n" @@ -74683,13 +277362,13 @@ ], "instructions": [ [ - "SQSHRN" + "SQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrnd_n_u64", + "name": "vqrshrnd_n_u64", "arguments": [ "uint64_t a", "const int n" @@ -74711,13 +277390,13 @@ ], "instructions": [ [ - "UQSHRN" + "UQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrnh_n_s16", + "name": "vqrshrnh_n_s16", "arguments": [ "int16_t a", "const int n" @@ -74739,13 +277418,13 @@ ], "instructions": [ [ - "SQSHRN" + "SQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrnh_n_u16", + "name": "vqrshrnh_n_u16", "arguments": [ "uint16_t a", "const int n" @@ -74767,13 +277446,13 @@ ], "instructions": [ [ - "UQSHRN" + "UQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrns_n_s32", + "name": "vqrshrns_n_s32", "arguments": [ "int32_t a", "const int n" @@ -74795,13 +277474,13 @@ ], "instructions": [ [ - "SQSHRN" + "SQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrns_n_u32", + "name": "vqrshrns_n_u32", "arguments": [ "uint32_t a", "const int n" @@ -74823,13 +277502,13 @@ ], "instructions": [ [ - "UQSHRN" + "UQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrun_high_n_s16", + "name": "vqrshrun_high_n_s16", "arguments": [ "uint8x8_t r", "int16x8_t a", @@ -74855,13 +277534,13 @@ ], "instructions": [ [ - "SQSHRUN2" + "SQRSHRUN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrun_high_n_s32", + "name": "vqrshrun_high_n_s32", "arguments": [ "uint16x4_t r", "int32x4_t a", @@ -74887,13 +277566,13 @@ ], "instructions": [ [ - "SQSHRUN2" + "SQRSHRUN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrun_high_n_s64", + "name": "vqrshrun_high_n_s64", "arguments": [ "uint32x2_t r", "int64x2_t a", @@ -74919,13 +277598,13 @@ ], "instructions": [ [ - "SQSHRUN2" + "SQRSHRUN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrun_n_s16", + "name": "vqrshrun_n_s16", "arguments": [ "int16x8_t a", "const int n" @@ -74949,13 +277628,13 @@ ], "instructions": [ [ - "SQSHRUN" + "SQRSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrun_n_s32", + "name": "vqrshrun_n_s32", "arguments": [ "int32x4_t a", "const int n" @@ -74979,13 +277658,13 @@ ], "instructions": [ [ - "SQSHRUN" + "SQRSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrun_n_s64", + "name": "vqrshrun_n_s64", "arguments": [ "int64x2_t a", "const int n" @@ -75009,13 +277688,13 @@ ], "instructions": [ [ - "SQSHRUN" + "SQRSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrund_n_s64", + "name": "vqrshrund_n_s64", "arguments": [ "int64_t a", "const int n" @@ -75037,13 +277716,13 @@ ], "instructions": [ [ - "SQSHRUN" + "SQRSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrunh_n_s16", + "name": "vqrshrunh_n_s16", "arguments": [ "int16_t a", "const int n" @@ -75065,13 +277744,13 @@ ], "instructions": [ [ - "SQSHRUN" + "SQRSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshruns_n_s32", + "name": "vqrshruns_n_s32", "arguments": [ "int32_t a", "const int n" @@ -75093,13 +277772,253 @@ ], "instructions": [ [ - "SQSHRUN" + "SQRSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsub_s16", + "name": "vqshl_n_s16", + "arguments": [ + "int16x4_t a", + "const int n" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "n": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshl_n_s32", + "arguments": [ + "int32x2_t a", + "const int n" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "n": { + "minimum": 0, + "maximum": 31 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshl_n_s64", + "arguments": [ + "int64x1_t a", + "const int n" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 0, + "maximum": 63 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshl_n_s8", + "arguments": [ + "int8x8_t a", + "const int n" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "n": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshl_n_u16", + "arguments": [ + "uint16x4_t a", + "const int n" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "n": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshl_n_u32", + "arguments": [ + "uint32x2_t a", + "const int n" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "n": { + "minimum": 0, + "maximum": 31 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshl_n_u64", + "arguments": [ + "uint64x1_t a", + "const int n" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 0, + "maximum": 63 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshl_n_u8", + "arguments": [ + "uint8x8_t a", + "const int n" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "n": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshl_s16", "arguments": [ "int16x4_t a", "int16x4_t b" @@ -75122,13 +278041,13 @@ ], "instructions": [ [ - "SQSUB" + "SQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsub_s32", + "name": "vqshl_s32", "arguments": [ "int32x2_t a", "int32x2_t b" @@ -75151,13 +278070,13 @@ ], "instructions": [ [ - "SQSUB" + "SQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsub_s64", + "name": "vqshl_s64", "arguments": [ "int64x1_t a", "int64x1_t b" @@ -75180,13 +278099,13 @@ ], "instructions": [ [ - "SQSUB" + "SQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsub_s8", + "name": "vqshl_s8", "arguments": [ "int8x8_t a", "int8x8_t b" @@ -75209,16 +278128,16 @@ ], "instructions": [ [ - "SQSUB" + "SQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsub_u16", + "name": "vqshl_u16", "arguments": [ "uint16x4_t a", - "uint16x4_t b" + "int16x4_t b" ], "return_type": { "value": "uint16x4_t" @@ -75238,16 +278157,16 @@ ], "instructions": [ [ - "UQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsub_u32", + "name": "vqshl_u32", "arguments": [ "uint32x2_t a", - "uint32x2_t b" + "int32x2_t b" ], "return_type": { "value": "uint32x2_t" @@ -75267,16 +278186,16 @@ ], "instructions": [ [ - "UQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsub_u64", + "name": "vqshl_u64", "arguments": [ "uint64x1_t a", - "uint64x1_t b" + "int64x1_t b" ], "return_type": { "value": "uint64x1_t" @@ -75296,16 +278215,16 @@ ], "instructions": [ [ - "UQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsub_u8", + "name": "vqshl_u8", "arguments": [ "uint8x8_t a", - "uint8x8_t b" + "int8x8_t b" ], "return_type": { "value": "uint8x8_t" @@ -75325,13 +278244,69 @@ ], "instructions": [ [ - "UQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubb_s8", + "name": "vqshlb_n_s8", + "arguments": [ + "int8_t a", + "const int n" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Bn" + }, + "n": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshlb_n_u8", + "arguments": [ + "uint8_t a", + "const int n" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Bn" + }, + "n": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshlb_s8", "arguments": [ "int8_t a", "int8_t b" @@ -75352,16 +278327,16 @@ ], "instructions": [ [ - "SQSUB" + "SQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubb_u8", + "name": "vqshlb_u8", "arguments": [ "uint8_t a", - "uint8_t b" + "int8_t b" ], "return_type": { "value": "uint8_t" @@ -75379,13 +278354,69 @@ ], "instructions": [ [ - "UQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubd_s64", + "name": "vqshld_n_s64", + "arguments": [ + "int64_t a", + "const int n" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 0, + "maximum": 63 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshld_n_u64", + "arguments": [ + "uint64_t a", + "const int n" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 0, + "maximum": 63 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshld_s64", "arguments": [ "int64_t a", "int64_t b" @@ -75406,94 +278437,390 @@ ], "instructions": [ [ - "SQSUB" + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshld_u64", + "arguments": [ + "uint64_t a", + "int64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshlh_n_s16", + "arguments": [ + "int16_t a", + "const int n" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshlh_n_u16", + "arguments": [ + "uint16_t a", + "const int n" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshlh_s16", + "arguments": [ + "int16_t a", + "int16_t b" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshlh_u16", + "arguments": [ + "uint16_t a", + "int16_t b" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshlq_n_s16", + "arguments": [ + "int16x8_t a", + "const int n" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "n": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshlq_n_s32", + "arguments": [ + "int32x4_t a", + "const int n" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "n": { + "minimum": 0, + "maximum": 31 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshlq_n_s64", + "arguments": [ + "int64x2_t a", + "const int n" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "n": { + "minimum": 0, + "maximum": 63 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshlq_n_s8", + "arguments": [ + "int8x16_t a", + "const int n" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "n": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshlq_n_u16", + "arguments": [ + "uint16x8_t a", + "const int n" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "n": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubd_u64", + "name": "vqshlq_n_u32", "arguments": [ - "uint64_t a", - "uint64_t b" + "uint32x4_t a", + "const int n" ], "return_type": { - "value": "uint64_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.4S" }, - "b": { - "register": "Dm" + "n": { + "minimum": 0, + "maximum": 31 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubh_s16", + "name": "vqshlq_n_u64", "arguments": [ - "int16_t a", - "int16_t b" + "uint64x2_t a", + "const int n" ], "return_type": { - "value": "int16_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vn.2D" }, - "b": { - "register": "Hm" + "n": { + "minimum": 0, + "maximum": 63 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubh_u16", + "name": "vqshlq_n_u8", "arguments": [ - "uint16_t a", - "uint16_t b" + "uint8x16_t a", + "const int n" ], "return_type": { - "value": "uint16_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vn.16B" }, - "b": { - "register": "Hm" + "n": { + "minimum": 0, + "maximum": 7 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubq_s16", + "name": "vqshlq_s16", "arguments": [ "int16x8_t a", "int16x8_t b" @@ -75516,13 +278843,13 @@ ], "instructions": [ [ - "SQSUB" + "SQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubq_s32", + "name": "vqshlq_s32", "arguments": [ "int32x4_t a", "int32x4_t b" @@ -75545,13 +278872,13 @@ ], "instructions": [ [ - "SQSUB" + "SQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubq_s64", + "name": "vqshlq_s64", "arguments": [ "int64x2_t a", "int64x2_t b" @@ -75574,13 +278901,13 @@ ], "instructions": [ [ - "SQSUB" + "SQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubq_s8", + "name": "vqshlq_s8", "arguments": [ "int8x16_t a", "int8x16_t b" @@ -75603,16 +278930,16 @@ ], "instructions": [ [ - "SQSUB" + "SQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubq_u16", + "name": "vqshlq_u16", "arguments": [ "uint16x8_t a", - "uint16x8_t b" + "int16x8_t b" ], "return_type": { "value": "uint16x8_t" @@ -75632,16 +278959,16 @@ ], "instructions": [ [ - "UQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubq_u32", + "name": "vqshlq_u32", "arguments": [ "uint32x4_t a", - "uint32x4_t b" + "int32x4_t b" ], "return_type": { "value": "uint32x4_t" @@ -75661,16 +278988,16 @@ ], "instructions": [ [ - "UQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubq_u64", + "name": "vqshlq_u64", "arguments": [ "uint64x2_t a", - "uint64x2_t b" + "int64x2_t b" ], "return_type": { "value": "uint64x2_t" @@ -75690,16 +279017,16 @@ ], "instructions": [ [ - "UQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubq_u8", + "name": "vqshlq_u8", "arguments": [ "uint8x16_t a", - "uint8x16_t b" + "int8x16_t b" ], "return_type": { "value": "uint8x16_t" @@ -75719,16 +279046,16 @@ ], "instructions": [ [ - "UQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubs_s32", + "name": "vqshls_n_s32", "arguments": [ "int32_t a", - "int32_t b" + "const int n" ], "return_type": { "value": "int32_t" @@ -75737,8 +279064,9 @@ "a": { "register": "Sn" }, - "b": { - "register": "Sm" + "n": { + "minimum": 0, + "maximum": 31 } }, "Architectures": [ @@ -75746,20 +279074,48 @@ ], "instructions": [ [ - "SQSUB" + "SQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubs_u32", + "name": "vqshls_n_u32", "arguments": [ "uint32_t a", - "uint32_t b" + "const int n" ], "return_type": { "value": "uint32_t" }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "n": { + "minimum": 0, + "maximum": 31 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshls_s32", + "arguments": [ + "int32_t a", + "int32_t b" + ], + "return_type": { + "value": "int32_t" + }, "Arguments_Preparation": { "a": { "register": "Sn" @@ -75773,26 +279129,26 @@ ], "instructions": [ [ - "UQSUB" + "SQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl1_p8", + "name": "vqshls_u32", "arguments": [ - "poly8x16_t t", - "uint8x8_t idx" + "uint32_t a", + "int32_t b" ], "return_type": { - "value": "poly8x8_t" + "value": "uint32_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Sn" }, - "t": { - "register": "Vn.16B" + "b": { + "register": "Sm" } }, "Architectures": [ @@ -75800,134 +279156,147 @@ ], "instructions": [ [ - "TBL" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl1_s8", + "name": "vqshlu_n_s16", "arguments": [ - "int8x16_t t", - "uint8x8_t idx" + "int16x4_t a", + "const int n" ], "return_type": { - "value": "int8x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Vn.4H" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 0, + "maximum": 15 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl1_u8", + "name": "vqshlu_n_s32", "arguments": [ - "uint8x16_t t", - "uint8x8_t idx" + "int32x2_t a", + "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Vn.2S" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 0, + "maximum": 31 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl1q_p8", + "name": "vqshlu_n_s64", "arguments": [ - "poly8x16_t t", - "uint8x16_t idx" + "int64x1_t a", + "const int n" ], "return_type": { - "value": "poly8x16_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" + "a": { + "register": "Dn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 0, + "maximum": 63 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl1q_s8", + "name": "vqshlu_n_s8", "arguments": [ - "int8x16_t t", - "uint8x16_t idx" + "int8x8_t a", + "const int n" ], "return_type": { - "value": "int8x16_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" + "a": { + "register": "Vn.8B" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 0, + "maximum": 7 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl1q_u8", + "name": "vqshlub_n_s8", "arguments": [ - "uint8x16_t t", - "uint8x16_t idx" + "int8_t a", + "const int n" ], "return_type": { - "value": "uint8x16_t" + "value": "uint8_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" + "a": { + "register": "Bn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 0, + "maximum": 7 } }, "Architectures": [ @@ -75935,26 +279304,27 @@ ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl2_p8", + "name": "vqshlud_n_s64", "arguments": [ - "poly8x16x2_t t", - "uint8x8_t idx" + "int64_t a", + "const int n" ], "return_type": { - "value": "poly8x8_t" + "value": "uint64_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Dn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 0, + "maximum": 63 } }, "Architectures": [ @@ -75962,26 +279332,27 @@ ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl2_s8", + "name": "vqshluh_n_s16", "arguments": [ - "int8x16x2_t t", - "uint8x8_t idx" + "int16_t a", + "const int n" ], "return_type": { - "value": "int8x8_t" + "value": "uint16_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Hn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 0, + "maximum": 15 } }, "Architectures": [ @@ -75989,134 +279360,147 @@ ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl2_u8", + "name": "vqshluq_n_s16", "arguments": [ - "uint8x16x2_t t", - "uint8x8_t idx" + "int16x8_t a", + "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Vn.8H" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 0, + "maximum": 15 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl2q_p8", + "name": "vqshluq_n_s32", "arguments": [ - "poly8x16x2_t t", - "uint8x16_t idx" + "int32x4_t a", + "const int n" ], "return_type": { - "value": "poly8x16_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" + "a": { + "register": "Vn.4S" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 0, + "maximum": 31 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl2q_s8", + "name": "vqshluq_n_s64", "arguments": [ - "int8x16x2_t t", - "uint8x16_t idx" + "int64x2_t a", + "const int n" ], "return_type": { - "value": "int8x16_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" + "a": { + "register": "Vn.2D" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 0, + "maximum": 63 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl2q_u8", + "name": "vqshluq_n_s8", "arguments": [ - "uint8x16x2_t t", - "uint8x16_t idx" + "int8x16_t a", + "const int n" ], "return_type": { "value": "uint8x16_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" - }, - "t": { + "a": { "register": "Vn.16B" + }, + "n": { + "minimum": 0, + "maximum": 7 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl3_p8", + "name": "vqshlus_n_s32", "arguments": [ - "poly8x16x3_t t", - "uint8x8_t idx" + "int32_t a", + "const int n" ], "return_type": { - "value": "poly8x8_t" + "value": "uint32_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Sn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 0, + "maximum": 31 } }, "Architectures": [ @@ -76124,26 +279508,31 @@ ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl3_s8", + "name": "vqshrn_high_n_s16", "arguments": [ - "int8x16x3_t t", - "uint8x8_t idx" + "int8x8_t r", + "int16x8_t a", + "const int n" ], "return_type": { - "value": "int8x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Vn.8H" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 8 + }, + "r": { + "register": "Vd.8B" } }, "Architectures": [ @@ -76151,26 +279540,31 @@ ], "instructions": [ [ - "TBL" + "SQSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl3_u8", + "name": "vqshrn_high_n_s32", "arguments": [ - "uint8x16x3_t t", - "uint8x8_t idx" + "int16x4_t r", + "int32x4_t a", + "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Vn.4S" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 16 + }, + "r": { + "register": "Vd.4H" } }, "Architectures": [ @@ -76178,26 +279572,31 @@ ], "instructions": [ [ - "TBL" + "SQSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl3q_p8", + "name": "vqshrn_high_n_s64", "arguments": [ - "poly8x16x3_t t", - "uint8x16_t idx" + "int32x2_t r", + "int64x2_t a", + "const int n" ], "return_type": { - "value": "poly8x16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" + "a": { + "register": "Vn.2D" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 32 + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ @@ -76205,26 +279604,31 @@ ], "instructions": [ [ - "TBL" + "SQSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl3q_s8", + "name": "vqshrn_high_n_u16", "arguments": [ - "int8x16x3_t t", - "uint8x16_t idx" + "uint8x8_t r", + "uint16x8_t a", + "const int n" ], "return_type": { - "value": "int8x16_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" + "a": { + "register": "Vn.8H" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 8 + }, + "r": { + "register": "Vd.8B" } }, "Architectures": [ @@ -76232,26 +279636,31 @@ ], "instructions": [ [ - "TBL" + "UQSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl3q_u8", + "name": "vqshrn_high_n_u32", "arguments": [ - "uint8x16x3_t t", - "uint8x16_t idx" + "uint16x4_t r", + "uint32x4_t a", + "const int n" ], "return_type": { - "value": "uint8x16_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" + "a": { + "register": "Vn.4S" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 16 + }, + "r": { + "register": "Vd.4H" } }, "Architectures": [ @@ -76259,26 +279668,31 @@ ], "instructions": [ [ - "TBL" + "UQSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl4_p8", + "name": "vqshrn_high_n_u64", "arguments": [ - "poly8x16x4_t t", - "uint8x8_t idx" + "uint32x2_t r", + "uint64x2_t a", + "const int n" ], "return_type": { - "value": "poly8x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Vn.2D" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 32 + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ @@ -76286,196 +279700,207 @@ ], "instructions": [ [ - "TBL" + "UQSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl4_s8", + "name": "vqshrn_n_s16", "arguments": [ - "int8x16x4_t t", - "uint8x8_t idx" + "int16x8_t a", + "const int n" ], "return_type": { "value": "int8x8_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Vn.8H" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "SQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl4_u8", + "name": "vqshrn_n_s32", "arguments": [ - "uint8x16x4_t t", - "uint8x8_t idx" + "int32x4_t a", + "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Vn.4S" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "SQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl4q_p8", + "name": "vqshrn_n_s64", "arguments": [ - "poly8x16x4_t t", - "uint8x16_t idx" + "int64x2_t a", + "const int n" ], "return_type": { - "value": "poly8x16_t" + "value": "int32x2_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" + "a": { + "register": "Vn.2D" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "SQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl4q_s8", + "name": "vqshrn_n_u16", "arguments": [ - "int8x16x4_t t", - "uint8x16_t idx" + "uint16x8_t a", + "const int n" ], "return_type": { - "value": "int8x16_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" + "a": { + "register": "Vn.8H" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "UQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl4q_u8", + "name": "vqshrn_n_u32", "arguments": [ - "uint8x16x4_t t", - "uint8x16_t idx" + "uint32x4_t a", + "const int n" ], "return_type": { - "value": "uint8x16_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" + "a": { + "register": "Vn.4S" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "UQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx1_p8", + "name": "vqshrn_n_u64", "arguments": [ - "poly8x8_t a", - "poly8x16_t t", - "uint8x8_t idx" + "uint64x2_t a", + "const int n" ], "return_type": { - "value": "poly8x8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "idx": { - "register": "Vm.8B" + "register": "Vn.2D" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "UQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx1_s8", + "name": "vqshrnd_n_s64", "arguments": [ - "int8x8_t a", - "int8x16_t t", - "uint8x8_t idx" + "int64_t a", + "const int n" ], "return_type": { - "value": "int8x8_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "idx": { - "register": "Vm.8B" + "register": "Dn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -76483,30 +279908,27 @@ ], "instructions": [ [ - "TBX" + "SQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx1_u8", + "name": "vqshrnd_n_u64", "arguments": [ - "uint8x8_t a", - "uint8x16_t t", - "uint8x8_t idx" + "uint64_t a", + "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "uint32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "idx": { - "register": "Vm.8B" + "register": "Dn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -76514,30 +279936,27 @@ ], "instructions": [ [ - "TBX" + "UQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx1q_p8", + "name": "vqshrnh_n_s16", "arguments": [ - "poly8x16_t a", - "poly8x16_t t", - "uint8x16_t idx" + "int16_t a", + "const int n" ], "return_type": { - "value": "poly8x16_t" + "value": "int8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "idx": { - "register": "Vm.16B" + "register": "Hn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -76545,30 +279964,27 @@ ], "instructions": [ [ - "TBX" + "SQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx1q_s8", + "name": "vqshrnh_n_u16", "arguments": [ - "int8x16_t a", - "int8x16_t t", - "uint8x16_t idx" + "uint16_t a", + "const int n" ], "return_type": { - "value": "int8x16_t" + "value": "uint8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "idx": { - "register": "Vm.16B" + "register": "Hn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -76576,30 +279992,27 @@ ], "instructions": [ [ - "TBX" + "UQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx1q_u8", + "name": "vqshrns_n_s32", "arguments": [ - "uint8x16_t a", - "uint8x16_t t", - "uint8x16_t idx" + "int32_t a", + "const int n" ], "return_type": { - "value": "uint8x16_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "idx": { - "register": "Vm.16B" + "register": "Sn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -76607,30 +280020,27 @@ ], "instructions": [ [ - "TBX" + "SQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx2_p8", + "name": "vqshrns_n_u32", "arguments": [ - "poly8x8_t a", - "poly8x16x2_t t", - "uint8x8_t idx" + "uint32_t a", + "const int n" ], "return_type": { - "value": "poly8x8_t" + "value": "uint16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "idx": { - "register": "Vm.8B" + "register": "Sn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -76638,30 +280048,31 @@ ], "instructions": [ [ - "TBX" + "UQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx2_s8", + "name": "vqshrun_high_n_s16", "arguments": [ - "int8x8_t a", - "int8x16x2_t t", - "uint8x8_t idx" + "uint8x8_t r", + "int16x8_t a", + "const int n" ], "return_type": { - "value": "int8x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vn.8H" }, - "idx": { - "register": "Vm.8B" + "n": { + "minimum": 1, + "maximum": 8 }, - "t": { - "register": "Vn.16B" + "r": { + "register": "Vd.8B" } }, "Architectures": [ @@ -76669,30 +280080,31 @@ ], "instructions": [ [ - "TBX" + "SQSHRUN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx2_u8", + "name": "vqshrun_high_n_s32", "arguments": [ - "uint8x8_t a", - "uint8x16x2_t t", - "uint8x8_t idx" + "uint16x4_t r", + "int32x4_t a", + "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vn.4S" }, - "idx": { - "register": "Vm.8B" + "n": { + "minimum": 1, + "maximum": 16 }, - "t": { - "register": "Vn.16B" + "r": { + "register": "Vd.4H" } }, "Architectures": [ @@ -76700,30 +280112,31 @@ ], "instructions": [ [ - "TBX" + "SQSHRUN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx2q_p8", + "name": "vqshrun_high_n_s64", "arguments": [ - "poly8x16_t a", - "poly8x16x2_t t", - "uint8x16_t idx" + "uint32x2_t r", + "int64x2_t a", + "const int n" ], "return_type": { - "value": "poly8x16_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vn.2D" }, - "idx": { - "register": "Vm.16B" + "n": { + "minimum": 1, + "maximum": 32 }, - "t": { - "register": "Vn.16B" + "r": { + "register": "Vd.2S" } }, "Architectures": [ @@ -76731,123 +280144,117 @@ ], "instructions": [ [ - "TBX" + "SQSHRUN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx2q_s8", + "name": "vqshrun_n_s16", "arguments": [ - "int8x16_t a", - "int8x16x2_t t", - "uint8x16_t idx" + "int16x8_t a", + "const int n" ], "return_type": { - "value": "int8x16_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "idx": { - "register": "Vm.16B" + "register": "Vn.8H" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "SQSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx2q_u8", + "name": "vqshrun_n_s32", "arguments": [ - "uint8x16_t a", - "uint8x16x2_t t", - "uint8x16_t idx" + "int32x4_t a", + "const int n" ], "return_type": { - "value": "uint8x16_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "idx": { - "register": "Vm.16B" + "register": "Vn.4S" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "SQSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx3_p8", + "name": "vqshrun_n_s64", "arguments": [ - "poly8x8_t a", - "poly8x16x3_t t", - "uint8x8_t idx" + "int64x2_t a", + "const int n" ], "return_type": { - "value": "poly8x8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "idx": { - "register": "Vm.8B" + "register": "Vn.2D" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "SQSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx3_s8", + "name": "vqshrund_n_s64", "arguments": [ - "int8x8_t a", - "int8x16x3_t t", - "uint8x8_t idx" + "int64_t a", + "const int n" ], "return_type": { - "value": "int8x8_t" + "value": "uint32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "idx": { - "register": "Vm.8B" + "register": "Dn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -76855,30 +280262,27 @@ ], "instructions": [ [ - "TBX" + "SQSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx3_u8", + "name": "vqshrunh_n_s16", "arguments": [ - "uint8x8_t a", - "uint8x16x3_t t", - "uint8x8_t idx" + "int16_t a", + "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "uint8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "idx": { - "register": "Vm.8B" + "register": "Hn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -76886,30 +280290,27 @@ ], "instructions": [ [ - "TBX" + "SQSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx3q_p8", + "name": "vqshruns_n_s32", "arguments": [ - "poly8x16_t a", - "poly8x16x3_t t", - "uint8x16_t idx" + "int32_t a", + "const int n" ], "return_type": { - "value": "poly8x16_t" + "value": "uint16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "idx": { - "register": "Vm.16B" + "register": "Sn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -76917,278 +280318,258 @@ ], "instructions": [ [ - "TBX" + "SQSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx3q_s8", + "name": "vqsub_s16", "arguments": [ - "int8x16_t a", - "int8x16x3_t t", - "uint8x16_t idx" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int8x16_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "idx": { - "register": "Vm.16B" + "register": "Vn.4H" }, - "t": { - "register": "Vn.16B" + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx3q_u8", + "name": "vqsub_s32", "arguments": [ - "uint8x16_t a", - "uint8x16x3_t t", - "uint8x16_t idx" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "idx": { - "register": "Vm.16B" + "register": "Vn.2S" }, - "t": { - "register": "Vn.16B" + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx4_p8", + "name": "vqsub_s64", "arguments": [ - "poly8x8_t a", - "poly8x16x4_t t", - "uint8x8_t idx" + "int64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "poly8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "idx": { - "register": "Vm.8B" + "register": "Dn" }, - "t": { - "register": "Vn.16B" + "b": { + "register": "Dm" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx4_s8", + "name": "vqsub_s8", "arguments": [ "int8x8_t a", - "int8x16x4_t t", - "uint8x8_t idx" + "int8x8_t b" ], "return_type": { "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vn.8B" }, - "idx": { + "b": { "register": "Vm.8B" - }, - "t": { - "register": "Vn.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx4_u8", + "name": "vqsub_u16", "arguments": [ - "uint8x8_t a", - "uint8x16x4_t t", - "uint8x8_t idx" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "idx": { - "register": "Vm.8B" + "register": "Vn.4H" }, - "t": { - "register": "Vn.16B" + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx4q_p8", + "name": "vqsub_u32", "arguments": [ - "poly8x16_t a", - "poly8x16x4_t t", - "uint8x16_t idx" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "idx": { - "register": "Vm.16B" + "register": "Vn.2S" }, - "t": { - "register": "Vn.16B" + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx4q_s8", + "name": "vqsub_u64", "arguments": [ - "int8x16_t a", - "int8x16x4_t t", - "uint8x16_t idx" + "uint64x1_t a", + "uint64x1_t b" ], "return_type": { - "value": "int8x16_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "idx": { - "register": "Vm.16B" + "register": "Dn" }, - "t": { - "register": "Vn.16B" + "b": { + "register": "Dm" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx4q_u8", + "name": "vqsub_u8", "arguments": [ - "uint8x16_t a", - "uint8x16x4_t t", - "uint8x16_t idx" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "idx": { - "register": "Vm.16B" + "register": "Vn.8B" }, - "t": { - "register": "Vn.16B" + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_high_s16", + "name": "vqsubb_s8", "arguments": [ - "int8x8_t r", - "int16x8_t a", - "int16x8_t b" + "int8_t a", + "int8_t b" ], "return_type": { - "value": "int8x16_t" + "value": "int8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Bn" }, "b": { - "register": "Vm.8H" - }, - "r": { - "register": "Vd.8B" + "register": "Bm" } }, "Architectures": [ @@ -77196,30 +280577,26 @@ ], "instructions": [ [ - "RADDHN2" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_high_s32", + "name": "vqsubb_u8", "arguments": [ - "int16x4_t r", - "int32x4_t a", - "int32x4_t b" + "uint8_t a", + "uint8_t b" ], "return_type": { - "value": "int16x8_t" + "value": "uint8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Bn" }, "b": { - "register": "Vm.4S" - }, - "r": { - "register": "Vd.4H" + "register": "Bm" } }, "Architectures": [ @@ -77227,30 +280604,26 @@ ], "instructions": [ [ - "RADDHN2" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_high_s64", + "name": "vqsubd_s64", "arguments": [ - "int32x2_t r", - "int64x2_t a", - "int64x2_t b" + "int64_t a", + "int64_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Dn" }, "b": { - "register": "Vm.2D" - }, - "r": { - "register": "Vd.2S" + "register": "Dm" } }, "Architectures": [ @@ -77258,30 +280631,26 @@ ], "instructions": [ [ - "RADDHN2" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_high_u16", + "name": "vqsubd_u64", "arguments": [ - "uint8x8_t r", - "uint16x8_t a", - "uint16x8_t b" + "uint64_t a", + "uint64_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Dn" }, "b": { - "register": "Vm.8H" - }, - "r": { - "register": "Vd.8B" + "register": "Dm" } }, "Architectures": [ @@ -77289,30 +280658,26 @@ ], "instructions": [ [ - "RADDHN2" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_high_u32", + "name": "vqsubh_s16", "arguments": [ - "uint16x4_t r", - "uint32x4_t a", - "uint32x4_t b" + "int16_t a", + "int16_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Hn" }, "b": { - "register": "Vm.4S" - }, - "r": { - "register": "Vd.4H" + "register": "Hm" } }, "Architectures": [ @@ -77320,30 +280685,26 @@ ], "instructions": [ [ - "RADDHN2" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_high_u64", + "name": "vqsubh_u16", "arguments": [ - "uint32x2_t r", - "uint64x2_t a", - "uint64x2_t b" + "uint16_t a", + "uint16_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "uint16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Hn" }, "b": { - "register": "Vm.2D" - }, - "r": { - "register": "Vd.2S" + "register": "Hm" } }, "Architectures": [ @@ -77351,19 +280712,19 @@ ], "instructions": [ [ - "RADDHN2" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_s16", + "name": "vqsubq_s16", "arguments": [ "int16x8_t a", "int16x8_t b" ], "return_type": { - "value": "int8x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -77380,19 +280741,19 @@ ], "instructions": [ [ - "RADDHN" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_s32", + "name": "vqsubq_s32", "arguments": [ "int32x4_t a", "int32x4_t b" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -77409,19 +280770,19 @@ ], "instructions": [ [ - "RADDHN" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_s64", + "name": "vqsubq_s64", "arguments": [ "int64x2_t a", "int64x2_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { @@ -77438,26 +280799,26 @@ ], "instructions": [ [ - "RADDHN" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_u16", + "name": "vqsubq_s8", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.16B" }, "b": { - "register": "Vm.8H" + "register": "Vm.16B" } }, "Architectures": [ @@ -77467,26 +280828,26 @@ ], "instructions": [ [ - "RADDHN" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_u32", + "name": "vqsubq_u16", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.8H" }, "b": { - "register": "Vm.4S" + "register": "Vm.8H" } }, "Architectures": [ @@ -77496,26 +280857,26 @@ ], "instructions": [ [ - "RADDHN" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_u64", + "name": "vqsubq_u32", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.4S" }, "b": { - "register": "Vm.2D" + "register": "Vm.4S" } }, "Architectures": [ @@ -77525,13 +280886,13 @@ ], "instructions": [ [ - "RADDHN" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrax1q_u64", + "name": "vqsubq_u64", "arguments": [ "uint64x2_t a", "uint64x2_t b" @@ -77543,52 +280904,66 @@ "a": { "register": "Vn.2D" }, - "b": {} + "b": { + "register": "Vm.2D" + } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "RAX1" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrbit_p8", + "name": "vqsubq_u8", "arguments": [ - "poly8x8_t a" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "poly8x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "RBIT" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrbit_s8", + "name": "vqsubs_s32", "arguments": [ - "int8x8_t a" + "int32_t a", + "int32_t b" ], "return_type": { - "value": "int8x8_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Sn" + }, + "b": { + "register": "Sm" } }, "Architectures": [ @@ -77596,22 +280971,26 @@ ], "instructions": [ [ - "RBIT" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrbit_u8", + "name": "vqsubs_u32", "arguments": [ - "uint8x8_t a" + "uint32_t a", + "uint32_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "uint32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Sn" + }, + "b": { + "register": "Sm" } }, "Architectures": [ @@ -77619,21 +280998,25 @@ ], "instructions": [ [ - "RBIT" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrbitq_p8", + "name": "vqtbl1_p8", "arguments": [ - "poly8x16_t a" + "poly8x16_t t", + "uint8x8_t idx" ], "return_type": { - "value": "poly8x16_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { + "idx": { + "register": "Vm.8B" + }, + "t": { "register": "Vn.16B" } }, @@ -77642,21 +281025,25 @@ ], "instructions": [ [ - "RBIT" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrbitq_s8", + "name": "vqtbl1_s8", "arguments": [ - "int8x16_t a" + "int8x16_t t", + "uint8x8_t idx" ], "return_type": { - "value": "int8x16_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { + "idx": { + "register": "Vm.8B" + }, + "t": { "register": "Vn.16B" } }, @@ -77665,21 +281052,25 @@ ], "instructions": [ [ - "RBIT" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrbitq_u8", + "name": "vqtbl1_u8", "arguments": [ - "uint8x16_t a" + "uint8x16_t t", + "uint8x8_t idx" ], "return_type": { - "value": "uint8x16_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { + "idx": { + "register": "Vm.8B" + }, + "t": { "register": "Vn.16B" } }, @@ -77688,71 +281079,80 @@ ], "instructions": [ [ - "RBIT" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpe_f16", + "name": "vqtbl1q_p8", "arguments": [ - "float16x4_t a" + "poly8x16_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "idx": { + "register": "Vm.16B" + }, + "t": { + "register": "Vn.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FRECPE" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpe_f32", + "name": "vqtbl1q_s8", "arguments": [ - "float32x2_t a" + "int8x16_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float32x2_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "idx": { + "register": "Vm.16B" + }, + "t": { + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FRECPE" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpe_f64", + "name": "vqtbl1q_u8", "arguments": [ - "float64x1_t a" + "uint8x16_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float64x1_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "idx": { + "register": "Vm.16B" + }, + "t": { + "register": "Vn.16B" } }, "Architectures": [ @@ -77760,47 +281160,59 @@ ], "instructions": [ [ - "FRECPE" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpe_u32", + "name": "vqtbl2_p8", "arguments": [ - "uint32x2_t a" + "poly8x16x2_t t", + "uint8x8_t idx" ], "return_type": { - "value": "uint32x2_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URECPE" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecped_f64", + "name": "vqtbl2_s8", "arguments": [ - "float64_t a" + "int8x16x2_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float64_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ @@ -77808,22 +281220,29 @@ ], "instructions": [ [ - "FRECPE" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpeh_f16", + "name": "vqtbl2_u8", "arguments": [ - "float16_t a" + "uint8x16x2_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float16_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ @@ -77831,71 +281250,89 @@ ], "instructions": [ [ - "FRECPE" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpeq_f16", + "name": "vqtbl2q_p8", "arguments": [ - "float16x8_t a" + "poly8x16x2_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FRECPE" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpeq_f32", + "name": "vqtbl2q_s8", "arguments": [ - "float32x4_t a" + "int8x16x2_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float32x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FRECPE" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpeq_f64", + "name": "vqtbl2q_u8", "arguments": [ - "float64x2_t a" + "uint8x16x2_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float64x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ @@ -77903,47 +281340,65 @@ ], "instructions": [ [ - "FRECPE" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpeq_u32", + "name": "vqtbl3_p8", "arguments": [ - "uint32x4_t a" + "poly8x16x3_t t", + "uint8x8_t idx" ], "return_type": { - "value": "uint32x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URECPE" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpes_f32", + "name": "vqtbl3_s8", "arguments": [ - "float32_t a" + "int8x16x3_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float32_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ @@ -77951,83 +281406,98 @@ ], "instructions": [ [ - "FRECPE" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecps_f16", + "name": "vqtbl3_u8", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "uint8x16x3_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "idx": { + "register": "Vm.8B" }, - "b": { - "register": "Vm.4H" + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FRECPS" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecps_f32", + "name": "vqtbl3q_p8", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "poly8x16x3_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float32x2_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "idx": { + "register": "Vm.16B" }, - "b": { - "register": "Vm.2S" + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FRECPS" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecps_f64", + "name": "vqtbl3q_s8", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "int8x16x3_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float64x1_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "idx": { + "register": "Vm.16B" }, - "b": { - "register": "Dm" + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ @@ -78035,26 +281505,32 @@ ], "instructions": [ [ - "FRECPS" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpsd_f64", + "name": "vqtbl3q_u8", "arguments": [ - "float64_t a", - "float64_t b" + "uint8x16x3_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float64_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "idx": { + "register": "Vm.16B" }, - "b": { - "register": "Dm" + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ @@ -78062,26 +281538,35 @@ ], "instructions": [ [ - "FRECPS" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpsh_f16", + "name": "vqtbl4_p8", "arguments": [ - "float16_t a", - "float16_t b" + "poly8x16x4_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float16_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "idx": { + "register": "Vm.8B" }, - "b": { - "register": "Hm" + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ @@ -78089,83 +281574,107 @@ ], "instructions": [ [ - "FRECPS" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpsq_f16", + "name": "vqtbl4_s8", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "int8x16x4_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "idx": { + "register": "Vm.8B" }, - "b": { - "register": "Vm.8H" + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FRECPS" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpsq_f32", + "name": "vqtbl4_u8", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "uint8x16x4_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float32x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "idx": { + "register": "Vm.8B" }, - "b": { - "register": "Vm.4S" + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FRECPS" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpsq_f64", + "name": "vqtbl4q_p8", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "poly8x16x4_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float64x2_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "idx": { + "register": "Vm.16B" }, - "b": { - "register": "Vm.2D" + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ @@ -78173,26 +281682,35 @@ ], "instructions": [ [ - "FRECPS" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpss_f32", + "name": "vqtbl4q_s8", "arguments": [ - "float32_t a", - "float32_t b" + "int8x16x4_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float32_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "idx": { + "register": "Vm.16B" }, - "b": { - "register": "Sm" + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ @@ -78200,22 +281718,35 @@ ], "instructions": [ [ - "FRECPS" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpxd_f64", + "name": "vqtbl4q_u8", "arguments": [ - "float64_t a" + "uint8x16x4_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float64_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ @@ -78223,22 +281754,30 @@ ], "instructions": [ [ - "FRECPX" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpxh_f16", + "name": "vqtbx1_p8", "arguments": [ - "float16_t a" + "poly8x8_t a", + "poly8x16_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float16_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t": { + "register": "Vn.16B" } }, "Architectures": [ @@ -78246,22 +281785,30 @@ ], "instructions": [ [ - "FRECPX" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpxs_f32", + "name": "vqtbx1_s8", "arguments": [ - "float32_t a" + "int8x8_t a", + "int8x16_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float32_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t": { + "register": "Vn.16B" } }, "Architectures": [ @@ -78269,47 +281816,61 @@ ], "instructions": [ [ - "FRECPX" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_f32", + "name": "vqtbx1_u8", "arguments": [ - "float32x2_t a" + "uint8x8_t a", + "uint8x16_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t": { + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_f64", + "name": "vqtbx1q_p8", "arguments": [ - "float64x1_t a" + "poly8x16_t a", + "poly8x16_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t": { + "register": "Vn.16B" } }, "Architectures": [ @@ -78317,321 +281878,450 @@ ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_p16", + "name": "vqtbx1q_s8", "arguments": [ - "poly16x4_t a" + "int8x16_t a", + "int8x16_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t": { + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_p64", + "name": "vqtbx1q_u8", "arguments": [ - "poly64x1_t a" + "uint8x16_t a", + "uint8x16_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t": { + "register": "Vn.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_p8", + "name": "vqtbx2_p8", "arguments": [ - "poly8x8_t a" + "poly8x8_t a", + "poly8x16x2_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_s16", + "name": "vqtbx2_s8", "arguments": [ - "int16x4_t a" + "int8x8_t a", + "int8x16x2_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_s32", + "name": "vqtbx2_u8", "arguments": [ - "int32x2_t a" + "uint8x8_t a", + "uint8x16x2_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_s64", + "name": "vqtbx2q_p8", "arguments": [ - "int64x1_t a" + "poly8x16_t a", + "poly8x16x2_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_s8", + "name": "vqtbx2q_s8", "arguments": [ - "int8x8_t a" + "int8x16_t a", + "int8x16x2_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_u16", + "name": "vqtbx2q_u8", "arguments": [ - "uint16x4_t a" + "uint8x16_t a", + "uint8x16x2_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_u32", + "name": "vqtbx3_p8", "arguments": [ - "uint32x2_t a" + "poly8x8_t a", + "poly8x16x3_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_u64", + "name": "vqtbx3_s8", "arguments": [ - "uint64x1_t a" + "int8x8_t a", + "int8x16x3_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_u8", + "name": "vqtbx3_u8", "arguments": [ - "uint8x8_t a" + "uint8x8_t a", + "uint8x16x3_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_f16", + "name": "vqtbx3q_p8", "arguments": [ - "float16x4_t a" + "poly8x16_t a", + "poly8x16x3_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float32x2_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_f64", + "name": "vqtbx3q_s8", "arguments": [ - "float64x1_t a" + "int8x16_t a", + "int8x16x3_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float32x2_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ @@ -78639,272 +282329,400 @@ ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_p16", + "name": "vqtbx3q_u8", "arguments": [ - "poly16x4_t a" + "uint8x16_t a", + "uint8x16x3_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float32x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_p8", + "name": "vqtbx4_p8", "arguments": [ - "poly8x8_t a" + "poly8x8_t a", + "poly8x16x4_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float32x2_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_s16", + "name": "vqtbx4_s8", "arguments": [ - "int16x4_t a" + "int8x8_t a", + "int8x16x4_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float32x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_s32", + "name": "vqtbx4_u8", "arguments": [ - "int32x2_t a" + "uint8x8_t a", + "uint8x16x4_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float32x2_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_s64", + "name": "vqtbx4q_p8", "arguments": [ - "int64x1_t a" + "poly8x16_t a", + "poly8x16x4_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float32x2_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_s8", + "name": "vqtbx4q_s8", "arguments": [ - "int8x8_t a" + "int8x16_t a", + "int8x16x4_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float32x2_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_u16", + "name": "vqtbx4q_u8", "arguments": [ - "uint16x4_t a" + "uint8x16_t a", + "uint8x16x4_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float32x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_u32", + "name": "vraddhn_high_s16", "arguments": [ - "uint32x2_t a" + "int8x8_t r", + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "float32x2_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + }, + "r": { + "register": "Vd.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "RADDHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_u64", + "name": "vraddhn_high_s32", "arguments": [ - "uint64x1_t a" + "int16x4_t r", + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "float32x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + }, + "r": { + "register": "Vd.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "RADDHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_u8", + "name": "vraddhn_high_s64", "arguments": [ - "uint8x8_t a" + "int32x2_t r", + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "float32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "RADDHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_f16", + "name": "vraddhn_high_u16", "arguments": [ - "float16x4_t a" + "uint8x8_t r", + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "float64x1_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + }, + "r": { + "register": "Vd.8B" } }, "Architectures": [ @@ -78912,22 +282730,30 @@ ], "instructions": [ [ - "NOP" + "RADDHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_f32", + "name": "vraddhn_high_u32", "arguments": [ - "float32x2_t a" + "uint16x4_t r", + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "float64x1_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + }, + "r": { + "register": "Vd.4H" } }, "Architectures": [ @@ -78935,22 +282761,30 @@ ], "instructions": [ [ - "NOP" + "RADDHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_p16", + "name": "vraddhn_high_u64", "arguments": [ - "poly16x4_t a" + "uint32x2_t r", + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "float64x1_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ @@ -78958,160 +282792,200 @@ ], "instructions": [ [ - "NOP" + "RADDHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_p64", + "name": "vraddhn_s16", "arguments": [ - "poly64x1_t a" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "float64x1_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "NOP" + "RADDHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_p8", + "name": "vraddhn_s32", "arguments": [ - "poly8x8_t a" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "float64x1_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "NOP" + "RADDHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_s16", + "name": "vraddhn_s64", "arguments": [ - "int16x4_t a" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "float64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "NOP" + "RADDHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_s32", + "name": "vraddhn_u16", "arguments": [ - "int32x2_t a" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "float64x1_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "NOP" + "RADDHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_s64", + "name": "vraddhn_u32", "arguments": [ - "int64x1_t a" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "float64x1_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "NOP" + "RADDHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_s8", + "name": "vraddhn_u64", "arguments": [ - "int8x8_t a" + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "float64x1_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "NOP" + "RADDHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_u16", + "name": "vrax1q_u64", "arguments": [ - "uint16x4_t a" + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "float64x1_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" } }, "Architectures": [ @@ -79119,22 +282993,22 @@ ], "instructions": [ [ - "NOP" + "RAX1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_u32", + "name": "vrbit_p8", "arguments": [ - "uint32x2_t a" + "poly8x8_t a" ], "return_type": { - "value": "float64x1_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.8B" } }, "Architectures": [ @@ -79142,22 +283016,22 @@ ], "instructions": [ [ - "NOP" + "RBIT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_u64", + "name": "vrbit_s8", "arguments": [ - "uint64x1_t a" + "int8x8_t a" ], "return_type": { - "value": "float64x1_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vn.8B" } }, "Architectures": [ @@ -79165,22 +283039,22 @@ ], "instructions": [ [ - "NOP" + "RBIT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_u8", + "name": "vrbit_u8", "arguments": [ "uint8x8_t a" ], "return_type": { - "value": "float64x1_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vn.8B" } }, "Architectures": [ @@ -79188,72 +283062,68 @@ ], "instructions": [ [ - "NOP" + "RBIT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_f16", + "name": "vrbitq_p8", "arguments": [ - "float16x4_t a" + "poly8x16_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "RBIT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_f32", + "name": "vrbitq_s8", "arguments": [ - "float32x2_t a" + "int8x16_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "RBIT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_f64", + "name": "vrbitq_u8", "arguments": [ - "float64x1_t a" + "uint8x16_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vn.16B" } }, "Architectures": [ @@ -79261,22 +283131,22 @@ ], "instructions": [ [ - "NOP" + "RBIT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_p64", + "name": "vrecpe_f16", "arguments": [ - "poly64x1_t a" + "float16x4_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vn.4H" } }, "Architectures": [ @@ -79285,22 +283155,22 @@ ], "instructions": [ [ - "NOP" + "FRECPE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_p8", + "name": "vrecpe_f32", "arguments": [ - "poly8x8_t a" + "float32x2_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vn.2S" } }, "Architectures": [ @@ -79310,47 +283180,45 @@ ], "instructions": [ [ - "NOP" + "FRECPE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_s16", + "name": "vrecpe_f64", "arguments": [ - "int16x4_t a" + "float64x1_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_s32", + "name": "vrecpe_u32", "arguments": [ - "int32x2_t a" + "uint32x2_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.2S" } }, "Architectures": [ @@ -79360,97 +283228,92 @@ ], "instructions": [ [ - "NOP" + "URECPE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_s64", + "name": "vrecped_f64", "arguments": [ - "int64x1_t a" + "float64_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_s8", + "name": "vrecpeh_f16", "arguments": [ - "int8x8_t a" + "float16_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Hn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_u16", + "name": "vrecpeq_f16", "arguments": [ - "uint16x4_t a" + "float16x8_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.8H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_u32", + "name": "vrecpeq_f32", "arguments": [ - "uint32x2_t a" + "float32x4_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.4S" } }, "Architectures": [ @@ -79460,47 +283323,45 @@ ], "instructions": [ [ - "NOP" + "FRECPE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_u64", + "name": "vrecpeq_f64", "arguments": [ - "uint64x1_t a" + "float64x2_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_u8", + "name": "vrecpeq_u32", "arguments": [ - "uint8x8_t a" + "uint32x4_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vn.4S" } }, "Architectures": [ @@ -79510,46 +283371,49 @@ ], "instructions": [ [ - "NOP" + "URECPE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_f16", + "name": "vrecpes_f32", "arguments": [ - "float16x4_t a" + "float32_t a" ], "return_type": { - "value": "poly64x1_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Sn" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_f32", + "name": "vrecps_f16", "arguments": [ - "float32x2_t a" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "poly64x1_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" } }, "Architectures": [ @@ -79558,117 +283422,136 @@ ], "instructions": [ [ - "NOP" + "FRECPS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_f64", + "name": "vrecps_f32", "arguments": [ - "float64x1_t a" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "poly64x1_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_p16", + "name": "vrecps_f64", "arguments": [ - "poly16x4_t a" + "float64x1_t a", + "float64x1_t b" ], "return_type": { - "value": "poly64x1_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Dn" + }, + "b": { + "register": "Dm" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_p8", + "name": "vrecpsd_f64", "arguments": [ - "poly8x8_t a" + "float64_t a", + "float64_t b" ], "return_type": { - "value": "poly64x1_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Dn" + }, + "b": { + "register": "Dm" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_s16", + "name": "vrecpsh_f16", "arguments": [ - "int16x4_t a" + "float16_t a", + "float16_t b" ], "return_type": { - "value": "poly64x1_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Hn" + }, + "b": { + "register": "Hm" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_s32", + "name": "vrecpsq_f16", "arguments": [ - "int32x2_t a" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "poly64x1_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" } }, "Architectures": [ @@ -79677,163 +283560,170 @@ ], "instructions": [ [ - "NOP" + "FRECPS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_s8", + "name": "vrecpsq_f32", "arguments": [ - "int8x8_t a" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "poly64x1_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_u16", + "name": "vrecpsq_f64", "arguments": [ - "uint16x4_t a" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "poly64x1_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_u32", + "name": "vrecpss_f32", "arguments": [ - "uint32x2_t a" + "float32_t a", + "float32_t b" ], "return_type": { - "value": "poly64x1_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Sn" + }, + "b": { + "register": "Sm" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_u64", + "name": "vrecpxd_f64", "arguments": [ - "uint64x1_t a" + "float64_t a" ], "return_type": { - "value": "poly64x1_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Dn" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_u8", + "name": "vrecpxh_f16", "arguments": [ - "uint8x8_t a" + "float16_t a" ], "return_type": { - "value": "poly64x1_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Hn" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_f16", + "name": "vrecpxs_f32", "arguments": [ - "float16x4_t a" + "float32_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Sn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_f32", + "name": "vreinterpret_f16_f32", "arguments": [ "float32x2_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { @@ -79853,12 +283743,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_f64", + "name": "vreinterpret_f16_f64", "arguments": [ "float64x1_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { @@ -79876,12 +283766,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_p16", + "name": "vreinterpret_f16_p16", "arguments": [ "poly16x4_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { @@ -79901,12 +283791,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_p64", + "name": "vreinterpret_f16_p64", "arguments": [ "poly64x1_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { @@ -79925,16 +283815,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_s16", + "name": "vreinterpret_f16_p8", "arguments": [ - "int16x4_t a" + "poly8x8_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.8B" } }, "Architectures": [ @@ -79950,16 +283840,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_s32", + "name": "vreinterpret_f16_s16", "arguments": [ - "int32x2_t a" + "int16x4_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.4H" } }, "Architectures": [ @@ -79975,16 +283865,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_s64", + "name": "vreinterpret_f16_s32", "arguments": [ - "int64x1_t a" + "int32x2_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.2S" } }, "Architectures": [ @@ -80000,16 +283890,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_s8", + "name": "vreinterpret_f16_s64", "arguments": [ - "int8x8_t a" + "int64x1_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vd.1D" } }, "Architectures": [ @@ -80025,16 +283915,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_u16", + "name": "vreinterpret_f16_s8", "arguments": [ - "uint16x4_t a" + "int8x8_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.8B" } }, "Architectures": [ @@ -80050,16 +283940,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_u32", + "name": "vreinterpret_f16_u16", "arguments": [ - "uint32x2_t a" + "uint16x4_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.4H" } }, "Architectures": [ @@ -80075,16 +283965,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_u64", + "name": "vreinterpret_f16_u32", "arguments": [ - "uint64x1_t a" + "uint32x2_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.2S" } }, "Architectures": [ @@ -80100,16 +283990,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_u8", + "name": "vreinterpret_f16_u64", "arguments": [ - "uint8x8_t a" + "uint64x1_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vd.1D" } }, "Architectures": [ @@ -80125,16 +284015,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_f16", + "name": "vreinterpret_f16_u8", "arguments": [ - "float16x4_t a" + "uint8x8_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.8B" } }, "Architectures": [ @@ -80150,16 +284040,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_f32", + "name": "vreinterpret_f32_f16", "arguments": [ - "float32x2_t a" + "float16x4_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.4H" } }, "Architectures": [ @@ -80175,12 +284065,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_f64", + "name": "vreinterpret_f32_f64", "arguments": [ "float64x1_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { @@ -80198,12 +284088,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_p16", + "name": "vreinterpret_f32_p16", "arguments": [ "poly16x4_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { @@ -80223,19 +284113,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_p64", + "name": "vreinterpret_f32_p8", "arguments": [ - "poly64x1_t a" + "poly8x8_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -80247,16 +284138,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_p8", + "name": "vreinterpret_f32_s16", "arguments": [ - "poly8x8_t a" + "int16x4_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vd.4H" } }, "Architectures": [ @@ -80272,12 +284163,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_s32", + "name": "vreinterpret_f32_s32", "arguments": [ "int32x2_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { @@ -80297,12 +284188,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_s64", + "name": "vreinterpret_f32_s64", "arguments": [ "int64x1_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { @@ -80322,12 +284213,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_s8", + "name": "vreinterpret_f32_s8", "arguments": [ "int8x8_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { @@ -80347,12 +284238,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_u16", + "name": "vreinterpret_f32_u16", "arguments": [ "uint16x4_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { @@ -80372,12 +284263,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_u32", + "name": "vreinterpret_f32_u32", "arguments": [ "uint32x2_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { @@ -80397,12 +284288,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_u64", + "name": "vreinterpret_f32_u64", "arguments": [ "uint64x1_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { @@ -80422,12 +284313,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_u8", + "name": "vreinterpret_f32_u8", "arguments": [ "uint8x8_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { @@ -80447,12 +284338,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_f16", + "name": "vreinterpret_f64_f16", "arguments": [ "float16x4_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { @@ -80460,8 +284351,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -80472,12 +284361,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_f32", + "name": "vreinterpret_f64_f32", "arguments": [ "float32x2_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { @@ -80485,8 +284374,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -80497,16 +284384,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_f64", + "name": "vreinterpret_f64_p16", "arguments": [ - "float64x1_t a" + "poly16x4_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.4H" } }, "Architectures": [ @@ -80520,21 +284407,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_p16", + "name": "vreinterpret_f64_p64", "arguments": [ - "poly16x4_t a" + "poly64x1_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -80545,20 +284430,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_p64", + "name": "vreinterpret_f64_p8", "arguments": [ - "poly64x1_t a" + "poly8x8_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.8B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -80569,21 +284453,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_p8", + "name": "vreinterpret_f64_s16", "arguments": [ - "poly8x8_t a" + "int16x4_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vd.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -80594,21 +284476,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_s16", + "name": "vreinterpret_f64_s32", "arguments": [ - "int16x4_t a" + "int32x2_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -80619,12 +284499,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_s64", + "name": "vreinterpret_f64_s64", "arguments": [ "int64x1_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { @@ -80632,8 +284512,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -80644,12 +284522,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_s8", + "name": "vreinterpret_f64_s8", "arguments": [ "int8x8_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { @@ -80657,8 +284535,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -80669,12 +284545,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_u16", + "name": "vreinterpret_f64_u16", "arguments": [ "uint16x4_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { @@ -80682,8 +284558,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -80694,12 +284568,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_u32", + "name": "vreinterpret_f64_u32", "arguments": [ "uint32x2_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { @@ -80707,8 +284581,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -80719,12 +284591,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_u64", + "name": "vreinterpret_f64_u64", "arguments": [ "uint64x1_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { @@ -80732,8 +284604,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -80744,12 +284614,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_u8", + "name": "vreinterpret_f64_u8", "arguments": [ "uint8x8_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { @@ -80757,8 +284627,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -80769,12 +284637,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_f16", + "name": "vreinterpret_p16_f16", "arguments": [ "float16x4_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { @@ -80794,12 +284662,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_f32", + "name": "vreinterpret_p16_f32", "arguments": [ "float32x2_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { @@ -80819,12 +284687,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_f64", + "name": "vreinterpret_p16_f64", "arguments": [ "float64x1_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { @@ -80842,20 +284710,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_p16", + "name": "vreinterpret_p16_p64", "arguments": [ - "poly16x4_t a" + "poly64x1_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -80867,19 +284734,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_p64", + "name": "vreinterpret_p16_p8", "arguments": [ - "poly64x1_t a" + "poly8x8_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -80891,16 +284759,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_p8", + "name": "vreinterpret_p16_s16", "arguments": [ - "poly8x8_t a" + "int16x4_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vd.4H" } }, "Architectures": [ @@ -80916,16 +284784,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_s16", + "name": "vreinterpret_p16_s32", "arguments": [ - "int16x4_t a" + "int32x2_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.2S" } }, "Architectures": [ @@ -80941,16 +284809,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_s32", + "name": "vreinterpret_p16_s64", "arguments": [ - "int32x2_t a" + "int64x1_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.1D" } }, "Architectures": [ @@ -80966,12 +284834,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_s8", + "name": "vreinterpret_p16_s8", "arguments": [ "int8x8_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { @@ -80991,12 +284859,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_u16", + "name": "vreinterpret_p16_u16", "arguments": [ "uint16x4_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { @@ -81016,12 +284884,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_u32", + "name": "vreinterpret_p16_u32", "arguments": [ "uint32x2_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { @@ -81041,12 +284909,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_u64", + "name": "vreinterpret_p16_u64", "arguments": [ "uint64x1_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { @@ -81066,12 +284934,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_u8", + "name": "vreinterpret_p16_u8", "arguments": [ "uint8x8_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { @@ -81091,12 +284959,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_f16", + "name": "vreinterpret_p64_f16", "arguments": [ "float16x4_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { @@ -81104,7 +284972,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -81116,12 +284983,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_f32", + "name": "vreinterpret_p64_f32", "arguments": [ "float32x2_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { @@ -81129,7 +284996,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -81141,12 +285007,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_f64", + "name": "vreinterpret_p64_f64", "arguments": [ "float64x1_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { @@ -81164,43 +285030,18 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_p16", + "name": "vreinterpret_p64_p16", "arguments": [ "poly16x4_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { "register": "Vd.4H" } }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_p64", - "arguments": [ - "poly64x1_t a" - ], - "return_type": { - "value": "int8x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.1D" - } - }, "Architectures": [ "A32", "A64" @@ -81213,12 +285054,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_p8", + "name": "vreinterpret_p64_p8", "arguments": [ "poly8x8_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { @@ -81226,7 +285067,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -81238,12 +285078,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_s16", + "name": "vreinterpret_p64_s16", "arguments": [ "int16x4_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { @@ -81251,7 +285091,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -81263,12 +285102,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_s32", + "name": "vreinterpret_p64_s32", "arguments": [ "int32x2_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { @@ -81276,7 +285115,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -81288,20 +285126,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_s64", + "name": "vreinterpret_p64_s8", "arguments": [ - "int64x1_t a" + "int8x8_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.8B" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -81313,12 +285150,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_u16", + "name": "vreinterpret_p64_u16", "arguments": [ "uint16x4_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { @@ -81326,7 +285163,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -81338,12 +285174,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_u32", + "name": "vreinterpret_p64_u32", "arguments": [ "uint32x2_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { @@ -81351,7 +285187,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -81363,12 +285198,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_u64", + "name": "vreinterpret_p64_u64", "arguments": [ "uint64x1_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { @@ -81376,7 +285211,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -81388,12 +285222,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_u8", + "name": "vreinterpret_p64_u8", "arguments": [ "uint8x8_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { @@ -81401,7 +285235,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -81413,12 +285246,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_f16", + "name": "vreinterpret_p8_f16", "arguments": [ "float16x4_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { @@ -81438,12 +285271,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_f32", + "name": "vreinterpret_p8_f32", "arguments": [ "float32x2_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { @@ -81463,12 +285296,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_f64", + "name": "vreinterpret_p8_f64", "arguments": [ "float64x1_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { @@ -81486,12 +285319,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_p16", + "name": "vreinterpret_p8_p16", "arguments": [ "poly16x4_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { @@ -81511,12 +285344,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_p64", + "name": "vreinterpret_p8_p64", "arguments": [ "poly64x1_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { @@ -81535,16 +285368,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_p8", + "name": "vreinterpret_p8_s16", "arguments": [ - "poly8x8_t a" + "int16x4_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vd.4H" } }, "Architectures": [ @@ -81560,16 +285393,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_s16", + "name": "vreinterpret_p8_s32", "arguments": [ - "int16x4_t a" + "int32x2_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.2S" } }, "Architectures": [ @@ -81585,16 +285418,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_s32", + "name": "vreinterpret_p8_s64", "arguments": [ - "int32x2_t a" + "int64x1_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.1D" } }, "Architectures": [ @@ -81610,16 +285443,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_s64", + "name": "vreinterpret_p8_s8", "arguments": [ - "int64x1_t a" + "int8x8_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.8B" } }, "Architectures": [ @@ -81635,16 +285468,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_s8", + "name": "vreinterpret_p8_u16", "arguments": [ - "int8x8_t a" + "uint16x4_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vd.4H" } }, "Architectures": [ @@ -81660,12 +285493,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_u32", + "name": "vreinterpret_p8_u32", "arguments": [ "uint32x2_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { @@ -81685,12 +285518,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_u64", + "name": "vreinterpret_p8_u64", "arguments": [ "uint64x1_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { @@ -81710,12 +285543,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_u8", + "name": "vreinterpret_p8_u8", "arguments": [ "uint8x8_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { @@ -81735,12 +285568,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_f16", + "name": "vreinterpret_s16_f16", "arguments": [ "float16x4_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { @@ -81760,12 +285593,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_f32", + "name": "vreinterpret_s16_f32", "arguments": [ "float32x2_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { @@ -81785,12 +285618,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_f64", + "name": "vreinterpret_s16_f64", "arguments": [ "float64x1_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { @@ -81808,12 +285641,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_p16", + "name": "vreinterpret_s16_p16", "arguments": [ "poly16x4_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { @@ -81833,12 +285666,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_p64", + "name": "vreinterpret_s16_p64", "arguments": [ "poly64x1_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { @@ -81857,12 +285690,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_p8", + "name": "vreinterpret_s16_p8", "arguments": [ "poly8x8_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { @@ -81882,16 +285715,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_s16", + "name": "vreinterpret_s16_s32", "arguments": [ - "int16x4_t a" + "int32x2_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.2S" } }, "Architectures": [ @@ -81907,16 +285740,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_s32", + "name": "vreinterpret_s16_s64", "arguments": [ - "int32x2_t a" + "int64x1_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.1D" } }, "Architectures": [ @@ -81932,16 +285765,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_s64", + "name": "vreinterpret_s16_s8", "arguments": [ - "int64x1_t a" + "int8x8_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.8B" } }, "Architectures": [ @@ -81957,16 +285790,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_s8", + "name": "vreinterpret_s16_u16", "arguments": [ - "int8x8_t a" + "uint16x4_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vd.4H" } }, "Architectures": [ @@ -81982,16 +285815,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_u16", + "name": "vreinterpret_s16_u32", "arguments": [ - "uint16x4_t a" + "uint32x2_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.2S" } }, "Architectures": [ @@ -82007,12 +285840,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_u64", + "name": "vreinterpret_s16_u64", "arguments": [ "uint64x1_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { @@ -82032,12 +285865,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_u8", + "name": "vreinterpret_s16_u8", "arguments": [ "uint8x8_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { @@ -82057,12 +285890,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_f16", + "name": "vreinterpret_s32_f16", "arguments": [ "float16x4_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { @@ -82082,12 +285915,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_f32", + "name": "vreinterpret_s32_f32", "arguments": [ "float32x2_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { @@ -82107,12 +285940,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_f64", + "name": "vreinterpret_s32_f64", "arguments": [ "float64x1_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { @@ -82130,12 +285963,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_p16", + "name": "vreinterpret_s32_p16", "arguments": [ "poly16x4_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { @@ -82155,12 +285988,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_p64", + "name": "vreinterpret_s32_p64", "arguments": [ "poly64x1_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { @@ -82179,12 +286012,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_p8", + "name": "vreinterpret_s32_p8", "arguments": [ "poly8x8_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { @@ -82204,12 +286037,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_s16", + "name": "vreinterpret_s32_s16", "arguments": [ "int16x4_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { @@ -82229,16 +286062,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_s32", + "name": "vreinterpret_s32_s64", "arguments": [ - "int32x2_t a" + "int64x1_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.1D" } }, "Architectures": [ @@ -82254,16 +286087,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_s64", + "name": "vreinterpret_s32_s8", "arguments": [ - "int64x1_t a" + "int8x8_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.8B" } }, "Architectures": [ @@ -82279,16 +286112,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_s8", + "name": "vreinterpret_s32_u16", "arguments": [ - "int8x8_t a" + "uint16x4_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vd.4H" } }, "Architectures": [ @@ -82304,16 +286137,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_u16", + "name": "vreinterpret_s32_u32", "arguments": [ - "uint16x4_t a" + "uint32x2_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.2S" } }, "Architectures": [ @@ -82329,16 +286162,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_u32", + "name": "vreinterpret_s32_u64", "arguments": [ - "uint32x2_t a" + "uint64x1_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.1D" } }, "Architectures": [ @@ -82354,12 +286187,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_u8", + "name": "vreinterpret_s32_u8", "arguments": [ "uint8x8_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { @@ -82379,12 +286212,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_f16", + "name": "vreinterpret_s64_f16", "arguments": [ "float16x4_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82404,12 +286237,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_f32", + "name": "vreinterpret_s64_f32", "arguments": [ "float32x2_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82429,12 +286262,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_f64", + "name": "vreinterpret_s64_f64", "arguments": [ "float64x1_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82452,12 +286285,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_p16", + "name": "vreinterpret_s64_p16", "arguments": [ "poly16x4_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82477,12 +286310,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_p64", + "name": "vreinterpret_s64_p64", "arguments": [ "poly64x1_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82501,12 +286334,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_p8", + "name": "vreinterpret_s64_p8", "arguments": [ "poly8x8_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82526,12 +286359,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_s16", + "name": "vreinterpret_s64_s16", "arguments": [ "int16x4_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82551,12 +286384,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_s32", + "name": "vreinterpret_s64_s32", "arguments": [ "int32x2_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82576,37 +286409,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_s64", - "arguments": [ - "int64x1_t a" - ], - "return_type": { - "value": "uint8x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.1D" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_s8", + "name": "vreinterpret_s64_s8", "arguments": [ "int8x8_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82626,12 +286434,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_u16", + "name": "vreinterpret_s64_u16", "arguments": [ "uint16x4_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82651,12 +286459,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_u32", + "name": "vreinterpret_s64_u32", "arguments": [ "uint32x2_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82676,12 +286484,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_u64", + "name": "vreinterpret_s64_u64", "arguments": [ "uint64x1_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82701,16 +286509,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_f32", + "name": "vreinterpret_s64_u8", "arguments": [ - "float32x4_t a" + "uint8x8_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.8B" } }, "Architectures": [ @@ -82726,42 +286534,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_f64", - "arguments": [ - "float64x2_t a" - ], - "return_type": { - "value": "float16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_p128", + "name": "vreinterpret_s8_f16", "arguments": [ - "poly128_t a" + "float16x4_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1Q" + "register": "Vd.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -82773,16 +286559,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_p16", + "name": "vreinterpret_s8_f32", "arguments": [ - "poly16x8_t a" + "float32x2_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2S" } }, "Architectures": [ @@ -82798,20 +286584,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_p64", + "name": "vreinterpret_s8_f64", "arguments": [ - "poly64x2_t a" + "float64x1_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.1D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -82822,16 +286607,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_p8", + "name": "vreinterpret_s8_p16", "arguments": [ - "poly8x16_t a" + "poly16x4_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.4H" } }, "Architectures": [ @@ -82847,20 +286632,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_s16", + "name": "vreinterpret_s8_p64", "arguments": [ - "int16x8_t a" + "poly64x1_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -82872,16 +286656,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_s32", + "name": "vreinterpret_s8_p8", "arguments": [ - "int32x4_t a" + "poly8x8_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.8B" } }, "Architectures": [ @@ -82897,16 +286681,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_s64", + "name": "vreinterpret_s8_s16", "arguments": [ - "int64x2_t a" + "int16x4_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.4H" } }, "Architectures": [ @@ -82922,16 +286706,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_s8", + "name": "vreinterpret_s8_s32", "arguments": [ - "int8x16_t a" + "int32x2_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.2S" } }, "Architectures": [ @@ -82947,16 +286731,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_u16", + "name": "vreinterpret_s8_s64", "arguments": [ - "uint16x8_t a" + "int64x1_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.1D" } }, "Architectures": [ @@ -82972,16 +286756,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_u32", + "name": "vreinterpret_s8_u16", "arguments": [ - "uint32x4_t a" + "uint16x4_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.4H" } }, "Architectures": [ @@ -82997,16 +286781,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_u64", + "name": "vreinterpret_s8_u32", "arguments": [ - "uint64x2_t a" + "uint32x2_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.2S" } }, "Architectures": [ @@ -83022,16 +286806,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_u8", + "name": "vreinterpret_s8_u64", "arguments": [ - "uint8x16_t a" + "uint64x1_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.1D" } }, "Architectures": [ @@ -83047,16 +286831,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_f16", + "name": "vreinterpret_s8_u8", "arguments": [ - "float16x8_t a" + "uint8x8_t a" ], "return_type": { - "value": "float32x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.8B" } }, "Architectures": [ @@ -83072,39 +286856,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_f64", - "arguments": [ - "float64x2_t a" - ], - "return_type": { - "value": "float32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_p16", + "name": "vreinterpret_u16_f16", "arguments": [ - "poly16x8_t a" + "float16x4_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.4H" } }, "Architectures": [ @@ -83120,16 +286881,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_p8", + "name": "vreinterpret_u16_f32", "arguments": [ - "poly8x16_t a" + "float32x2_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.2S" } }, "Architectures": [ @@ -83145,21 +286906,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_s16", + "name": "vreinterpret_u16_f64", "arguments": [ - "int16x8_t a" + "float64x1_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -83170,16 +286929,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_s32", + "name": "vreinterpret_u16_p16", "arguments": [ - "int32x4_t a" + "poly16x4_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.4H" } }, "Architectures": [ @@ -83195,20 +286954,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_s64", + "name": "vreinterpret_u16_p64", "arguments": [ - "int64x2_t a" + "poly64x1_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -83220,16 +286978,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_s8", + "name": "vreinterpret_u16_p8", "arguments": [ - "int8x16_t a" + "poly8x8_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.8B" } }, "Architectures": [ @@ -83245,16 +287003,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_u16", + "name": "vreinterpret_u16_s16", "arguments": [ - "uint16x8_t a" + "int16x4_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.4H" } }, "Architectures": [ @@ -83270,16 +287028,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_u32", + "name": "vreinterpret_u16_s32", "arguments": [ - "uint32x4_t a" + "int32x2_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.2S" } }, "Architectures": [ @@ -83295,16 +287053,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_u64", + "name": "vreinterpret_u16_s64", "arguments": [ - "uint64x2_t a" + "int64x1_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.1D" } }, "Architectures": [ @@ -83320,16 +287078,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_u8", + "name": "vreinterpret_u16_s8", "arguments": [ - "uint8x16_t a" + "int8x8_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.8B" } }, "Architectures": [ @@ -83345,19 +287103,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_f16", + "name": "vreinterpret_u16_u32", "arguments": [ - "float16x8_t a" + "uint32x2_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83368,19 +287128,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_f32", + "name": "vreinterpret_u16_u64", "arguments": [ - "float32x4_t a" + "uint64x1_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.1D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83391,19 +287153,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_p128", + "name": "vreinterpret_u16_u8", "arguments": [ - "poly128_t a" + "uint8x8_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1Q" + "register": "Vd.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83414,19 +287178,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_p16", + "name": "vreinterpret_u32_f16", "arguments": [ - "poly16x8_t a" + "float16x4_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83437,19 +287203,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_p64", + "name": "vreinterpret_u32_f32", "arguments": [ - "poly64x2_t a" + "float32x2_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83460,16 +287228,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_p8", + "name": "vreinterpret_u32_f64", "arguments": [ - "poly8x16_t a" + "float64x1_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.1D" } }, "Architectures": [ @@ -83483,19 +287251,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_s16", + "name": "vreinterpret_u32_p16", "arguments": [ - "int16x8_t a" + "poly16x4_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83506,19 +287276,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_s32", + "name": "vreinterpret_u32_p64", "arguments": [ - "int32x4_t a" + "poly64x1_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.1D" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ @@ -83529,19 +287300,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_s64", + "name": "vreinterpret_u32_p8", "arguments": [ - "int64x2_t a" + "poly8x8_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83552,19 +287325,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_s8", + "name": "vreinterpret_u32_s16", "arguments": [ - "int8x16_t a" + "int16x4_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83575,19 +287350,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_u16", + "name": "vreinterpret_u32_s32", "arguments": [ - "uint16x8_t a" + "int32x2_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83598,19 +287375,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_u32", + "name": "vreinterpret_u32_s64", "arguments": [ - "uint32x4_t a" + "int64x1_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.1D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83621,16 +287400,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_u64", + "name": "vreinterpret_u32_s8", "arguments": [ - "uint64x2_t a" + "int8x8_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.8B" } }, "Architectures": [ @@ -83646,19 +287425,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_u8", + "name": "vreinterpret_u32_u16", "arguments": [ - "uint8x16_t a" + "uint16x4_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83669,19 +287450,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_f16", + "name": "vreinterpret_u32_u64", "arguments": [ - "float16x8_t a" + "uint64x1_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.1D" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -83693,19 +287475,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_f32", + "name": "vreinterpret_u32_u8", "arguments": [ - "float32x4_t a" + "uint8x8_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -83717,19 +287500,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_f64", + "name": "vreinterpret_u64_f16", "arguments": [ - "float64x2_t a" + "float16x4_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1Q" + "register": "Vd.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83740,19 +287525,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_p16", + "name": "vreinterpret_u64_f32", "arguments": [ - "poly16x8_t a" + "float32x2_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2S" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -83764,20 +287550,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_p8", + "name": "vreinterpret_u64_f64", "arguments": [ - "poly8x16_t a" + "float64x1_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.1D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -83788,19 +287573,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_s16", + "name": "vreinterpret_u64_p16", "arguments": [ - "int16x8_t a" + "poly16x4_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -83812,16 +287598,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_s32", + "name": "vreinterpret_u64_p64", "arguments": [ - "int32x4_t a" + "poly64x1_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.1D" } }, "Architectures": [ @@ -83836,19 +287622,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_s64", + "name": "vreinterpret_u64_p8", "arguments": [ - "int64x2_t a" + "poly8x8_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1Q" + "register": "Vd.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -83860,19 +287647,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_s8", + "name": "vreinterpret_u64_s16", "arguments": [ - "int8x16_t a" + "int16x4_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -83884,19 +287672,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_u16", + "name": "vreinterpret_u64_s32", "arguments": [ - "uint16x8_t a" + "int32x2_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2S" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -83908,19 +287697,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_u32", + "name": "vreinterpret_u64_s64", "arguments": [ - "uint32x4_t a" + "int64x1_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.1D" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -83932,19 +287722,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_u64", + "name": "vreinterpret_u64_s8", "arguments": [ - "uint64x2_t a" + "int8x8_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1Q" + "register": "Vd.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -83956,19 +287747,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_u8", + "name": "vreinterpret_u64_u16", "arguments": [ - "uint8x16_t a" + "uint16x4_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -83980,16 +287772,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_f16", + "name": "vreinterpret_u64_u32", "arguments": [ - "float16x8_t a" + "uint32x2_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2S" } }, "Architectures": [ @@ -84005,16 +287797,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_f32", + "name": "vreinterpret_u64_u8", "arguments": [ - "float32x4_t a" + "uint8x8_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.8B" } }, "Architectures": [ @@ -84030,19 +287822,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_f64", + "name": "vreinterpret_u8_f16", "arguments": [ - "float64x2_t a" + "float16x4_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -84053,19 +287847,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_p128", + "name": "vreinterpret_u8_f32", "arguments": [ - "poly128_t a" + "float32x2_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1Q" + "register": "Vd.2S" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84077,20 +287872,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_p64", + "name": "vreinterpret_u8_f64", "arguments": [ - "poly64x2_t a" + "float64x1_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.1D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -84101,16 +287895,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_p8", + "name": "vreinterpret_u8_p16", "arguments": [ - "poly8x16_t a" + "poly16x4_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.4H" } }, "Architectures": [ @@ -84126,20 +287920,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_s16", + "name": "vreinterpret_u8_p64", "arguments": [ - "int16x8_t a" + "poly64x1_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -84151,16 +287944,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_s32", + "name": "vreinterpret_u8_p8", "arguments": [ - "int32x4_t a" + "poly8x8_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.8B" } }, "Architectures": [ @@ -84176,16 +287969,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_s64", + "name": "vreinterpret_u8_s16", "arguments": [ - "int64x2_t a" + "int16x4_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.4H" } }, "Architectures": [ @@ -84201,16 +287994,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_s8", + "name": "vreinterpret_u8_s32", "arguments": [ - "int8x16_t a" + "int32x2_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.2S" } }, "Architectures": [ @@ -84226,16 +288019,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_u16", + "name": "vreinterpret_u8_s64", "arguments": [ - "uint16x8_t a" + "int64x1_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.1D" } }, "Architectures": [ @@ -84251,16 +288044,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_u32", + "name": "vreinterpret_u8_s8", "arguments": [ - "uint32x4_t a" + "int8x8_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.8B" } }, "Architectures": [ @@ -84276,16 +288069,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_u64", + "name": "vreinterpret_u8_u16", "arguments": [ - "uint64x2_t a" + "uint16x4_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.4H" } }, "Architectures": [ @@ -84301,16 +288094,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_u8", + "name": "vreinterpret_u8_u32", "arguments": [ - "uint8x16_t a" + "uint32x2_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.2S" } }, "Architectures": [ @@ -84326,19 +288119,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_f16", + "name": "vreinterpret_u8_u64", "arguments": [ - "float16x8_t a" + "uint64x1_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.1D" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84350,12 +288144,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_f32", + "name": "vreinterpretq_f16_f32", "arguments": [ "float32x4_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { @@ -84363,6 +288157,7 @@ } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84374,12 +288169,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_f64", + "name": "vreinterpretq_f16_f64", "arguments": [ "float64x2_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { @@ -84397,16 +288192,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_p16", + "name": "vreinterpretq_f16_p128", "arguments": [ - "poly16x8_t a" + "poly128_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.1Q" } }, "Architectures": [ @@ -84421,19 +288216,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_p8", + "name": "vreinterpretq_f16_p16", "arguments": [ - "poly8x16_t a" + "poly16x8_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84445,16 +288241,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_s16", + "name": "vreinterpretq_f16_p64", "arguments": [ - "int16x8_t a" + "poly64x2_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2D" } }, "Architectures": [ @@ -84469,19 +288265,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_s32", + "name": "vreinterpretq_f16_p8", "arguments": [ - "int32x4_t a" + "poly8x16_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84493,19 +288290,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_s64", + "name": "vreinterpretq_f16_s16", "arguments": [ - "int64x2_t a" + "int16x8_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84517,19 +288315,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_s8", + "name": "vreinterpretq_f16_s32", "arguments": [ - "int8x16_t a" + "int32x4_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84541,19 +288340,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_u16", + "name": "vreinterpretq_f16_s64", "arguments": [ - "uint16x8_t a" + "int64x2_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2D" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84565,19 +288365,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_u32", + "name": "vreinterpretq_f16_s8", "arguments": [ - "uint32x4_t a" + "int8x16_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84589,19 +288390,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_u64", + "name": "vreinterpretq_f16_u16", "arguments": [ - "uint64x2_t a" + "uint16x8_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84613,19 +288415,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_u8", + "name": "vreinterpretq_f16_u32", "arguments": [ - "uint8x16_t a" + "uint32x4_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84637,16 +288440,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_f16", + "name": "vreinterpretq_f16_u64", "arguments": [ - "float16x8_t a" + "uint64x2_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2D" } }, "Architectures": [ @@ -84662,16 +288465,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_f32", + "name": "vreinterpretq_f16_u8", "arguments": [ - "float32x4_t a" + "uint8x16_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.16B" } }, "Architectures": [ @@ -84687,19 +288490,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_f64", + "name": "vreinterpretq_f32_f16", "arguments": [ - "float64x2_t a" + "float16x8_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -84710,20 +288515,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_p128", + "name": "vreinterpretq_f32_f64", "arguments": [ - "poly128_t a" + "float64x2_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1Q" + "register": "Vd.2D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -84734,12 +288538,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_p16", + "name": "vreinterpretq_f32_p16", "arguments": [ "poly16x8_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { @@ -84759,19 +288563,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_p64", + "name": "vreinterpretq_f32_p8", "arguments": [ - "poly64x2_t a" + "poly8x16_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84783,12 +288588,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_s16", + "name": "vreinterpretq_f32_s16", "arguments": [ "int16x8_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { @@ -84808,12 +288613,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_s32", + "name": "vreinterpretq_f32_s32", "arguments": [ "int32x4_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { @@ -84833,12 +288638,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_s64", + "name": "vreinterpretq_f32_s64", "arguments": [ "int64x2_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { @@ -84858,12 +288663,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_s8", + "name": "vreinterpretq_f32_s8", "arguments": [ "int8x16_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { @@ -84883,12 +288688,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_u16", + "name": "vreinterpretq_f32_u16", "arguments": [ "uint16x8_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { @@ -84908,12 +288713,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_u32", + "name": "vreinterpretq_f32_u32", "arguments": [ "uint32x4_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { @@ -84933,12 +288738,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_u64", + "name": "vreinterpretq_f32_u64", "arguments": [ "uint64x2_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { @@ -84958,12 +288763,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_u8", + "name": "vreinterpretq_f32_u8", "arguments": [ "uint8x16_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { @@ -84983,12 +288788,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_f16", + "name": "vreinterpretq_f64_f16", "arguments": [ "float16x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { @@ -84996,8 +288801,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -85008,12 +288811,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_f32", + "name": "vreinterpretq_f64_f32", "arguments": [ "float32x4_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { @@ -85021,8 +288824,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -85033,16 +288834,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_f64", + "name": "vreinterpretq_f64_p128", "arguments": [ - "float64x2_t a" + "poly128_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.1Q" } }, "Architectures": [ @@ -85056,20 +288857,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_p128", + "name": "vreinterpretq_f64_p16", "arguments": [ - "poly128_t a" + "poly16x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1Q" + "register": "Vd.8H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -85080,21 +288880,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_p16", + "name": "vreinterpretq_f64_p64", "arguments": [ - "poly16x8_t a" + "poly64x2_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -85105,20 +288903,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_p64", + "name": "vreinterpretq_f64_p8", "arguments": [ - "poly64x2_t a" + "poly8x16_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -85129,21 +288926,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_p8", + "name": "vreinterpretq_f64_s16", "arguments": [ - "poly8x16_t a" + "int16x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -85154,12 +288949,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_s32", + "name": "vreinterpretq_f64_s32", "arguments": [ "int32x4_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { @@ -85167,8 +288962,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -85179,12 +288972,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_s64", + "name": "vreinterpretq_f64_s64", "arguments": [ "int64x2_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { @@ -85192,8 +288985,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -85204,12 +288995,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_s8", + "name": "vreinterpretq_f64_s8", "arguments": [ "int8x16_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { @@ -85217,8 +289008,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -85229,12 +289018,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_u16", + "name": "vreinterpretq_f64_u16", "arguments": [ "uint16x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { @@ -85242,8 +289031,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -85254,12 +289041,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_u32", + "name": "vreinterpretq_f64_u32", "arguments": [ "uint32x4_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { @@ -85267,8 +289054,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -85279,12 +289064,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_u64", + "name": "vreinterpretq_f64_u64", "arguments": [ "uint64x2_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { @@ -85304,12 +289089,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_u8", + "name": "vreinterpretq_f64_u8", "arguments": [ "uint8x16_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { @@ -85317,8 +289102,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -85329,12 +289112,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_f16", + "name": "vreinterpretq_p128_f16", "arguments": [ "float16x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { @@ -85342,7 +289125,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85354,12 +289136,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_f32", + "name": "vreinterpretq_p128_f32", "arguments": [ "float32x4_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { @@ -85367,7 +289149,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85379,35 +289160,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_f64", + "name": "vreinterpretq_p128_f64", "arguments": [ "float64x2_t a" ], "return_type": { - "value": "int32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_p128", - "arguments": [ - "poly128_t a" - ], - "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { @@ -85415,7 +289173,6 @@ } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -85426,12 +289183,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_p16", + "name": "vreinterpretq_p128_p16", "arguments": [ "poly16x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { @@ -85439,7 +289196,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85451,16 +289207,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_p64", + "name": "vreinterpretq_p128_p8", "arguments": [ - "poly64x2_t a" + "poly8x16_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.16B" } }, "Architectures": [ @@ -85475,20 +289231,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_p8", + "name": "vreinterpretq_p128_s16", "arguments": [ - "poly8x16_t a" + "int16x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.8H" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85500,20 +289255,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_s16", + "name": "vreinterpretq_p128_s32", "arguments": [ - "int16x8_t a" + "int32x4_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85525,20 +289279,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_s64", + "name": "vreinterpretq_p128_s64", "arguments": [ "int64x2_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.1Q" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85550,12 +289303,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_s8", + "name": "vreinterpretq_p128_s8", "arguments": [ "int8x16_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { @@ -85563,7 +289316,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85575,12 +289327,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_u16", + "name": "vreinterpretq_p128_u16", "arguments": [ "uint16x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { @@ -85588,7 +289340,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85600,12 +289351,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_u32", + "name": "vreinterpretq_p128_u32", "arguments": [ "uint32x4_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { @@ -85613,7 +289364,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85625,20 +289375,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_u64", + "name": "vreinterpretq_p128_u64", "arguments": [ "uint64x2_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.1Q" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85650,12 +289399,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_u8", + "name": "vreinterpretq_p128_u8", "arguments": [ "uint8x16_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { @@ -85663,7 +289412,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85675,12 +289423,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_f16", + "name": "vreinterpretq_p16_f16", "arguments": [ "float16x8_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { @@ -85700,12 +289448,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_f32", + "name": "vreinterpretq_p16_f32", "arguments": [ "float32x4_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { @@ -85725,12 +289473,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_f64", + "name": "vreinterpretq_p16_f64", "arguments": [ "float64x2_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { @@ -85748,12 +289496,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_p128", + "name": "vreinterpretq_p16_p128", "arguments": [ "poly128_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { @@ -85772,20 +289520,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_p16", + "name": "vreinterpretq_p16_p64", "arguments": [ - "poly16x8_t a" + "poly64x2_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85797,19 +289544,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_p64", + "name": "vreinterpretq_p16_p8", "arguments": [ - "poly64x2_t a" + "poly8x16_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -85821,16 +289569,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_p8", + "name": "vreinterpretq_p16_s16", "arguments": [ - "poly8x16_t a" + "int16x8_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.8H" } }, "Architectures": [ @@ -85846,16 +289594,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_s16", + "name": "vreinterpretq_p16_s32", "arguments": [ - "int16x8_t a" + "int32x4_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.4S" } }, "Architectures": [ @@ -85871,16 +289619,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_s32", + "name": "vreinterpretq_p16_s64", "arguments": [ - "int32x4_t a" + "int64x2_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.2D" } }, "Architectures": [ @@ -85896,12 +289644,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_s8", + "name": "vreinterpretq_p16_s8", "arguments": [ "int8x16_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { @@ -85921,12 +289669,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_u16", + "name": "vreinterpretq_p16_u16", "arguments": [ "uint16x8_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { @@ -85946,12 +289694,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_u32", + "name": "vreinterpretq_p16_u32", "arguments": [ "uint32x4_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { @@ -85971,12 +289719,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_u64", + "name": "vreinterpretq_p16_u64", "arguments": [ "uint64x2_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { @@ -85996,12 +289744,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_u8", + "name": "vreinterpretq_p16_u8", "arguments": [ "uint8x16_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { @@ -86021,12 +289769,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_f16", + "name": "vreinterpretq_p64_f16", "arguments": [ "float16x8_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { @@ -86034,7 +289782,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -86046,12 +289793,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_f32", + "name": "vreinterpretq_p64_f32", "arguments": [ "float32x4_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { @@ -86059,7 +289806,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -86071,12 +289817,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_f64", + "name": "vreinterpretq_p64_f64", "arguments": [ "float64x2_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { @@ -86094,36 +289840,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_p128", - "arguments": [ - "poly128_t a" - ], - "return_type": { - "value": "int8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.1Q" - } - }, - "Architectures": [ - "A32", - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_p16", + "name": "vreinterpretq_p64_p16", "arguments": [ "poly16x8_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { @@ -86131,7 +289853,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -86143,16 +289864,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_p64", + "name": "vreinterpretq_p64_p8", "arguments": [ - "poly64x2_t a" + "poly8x16_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.16B" } }, "Architectures": [ @@ -86167,20 +289888,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_p8", + "name": "vreinterpretq_p64_s16", "arguments": [ - "poly8x16_t a" + "int16x8_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.8H" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -86192,20 +289912,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_s16", + "name": "vreinterpretq_p64_s32", "arguments": [ - "int16x8_t a" + "int32x4_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -86217,20 +289936,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_s32", + "name": "vreinterpretq_p64_s64", "arguments": [ - "int32x4_t a" + "int64x2_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.2D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -86242,20 +289960,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_s64", + "name": "vreinterpretq_p64_s8", "arguments": [ - "int64x2_t a" + "int8x16_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.16B" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -86267,12 +289984,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_u16", + "name": "vreinterpretq_p64_u16", "arguments": [ "uint16x8_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { @@ -86280,7 +289997,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -86292,12 +290008,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_u32", + "name": "vreinterpretq_p64_u32", "arguments": [ "uint32x4_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { @@ -86305,7 +290021,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -86317,12 +290032,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_u64", + "name": "vreinterpretq_p64_u64", "arguments": [ "uint64x2_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { @@ -86330,7 +290045,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -86342,12 +290056,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_u8", + "name": "vreinterpretq_p64_u8", "arguments": [ "uint8x16_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { @@ -86355,7 +290069,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -86367,12 +290080,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_f16", + "name": "vreinterpretq_p8_f16", "arguments": [ "float16x8_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { @@ -86392,12 +290105,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_f32", + "name": "vreinterpretq_p8_f32", "arguments": [ "float32x4_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { @@ -86417,12 +290130,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_f64", + "name": "vreinterpretq_p8_f64", "arguments": [ "float64x2_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { @@ -86440,12 +290153,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_p128", + "name": "vreinterpretq_p8_p128", "arguments": [ "poly128_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { @@ -86464,12 +290177,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_p16", + "name": "vreinterpretq_p8_p16", "arguments": [ "poly16x8_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { @@ -86489,12 +290202,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_p64", + "name": "vreinterpretq_p8_p64", "arguments": [ "poly64x2_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { @@ -86513,16 +290226,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_p8", + "name": "vreinterpretq_p8_s16", "arguments": [ - "poly8x16_t a" + "int16x8_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.8H" } }, "Architectures": [ @@ -86538,16 +290251,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_s16", + "name": "vreinterpretq_p8_s32", "arguments": [ - "int16x8_t a" + "int32x4_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.4S" } }, "Architectures": [ @@ -86563,16 +290276,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_s32", + "name": "vreinterpretq_p8_s64", "arguments": [ - "int32x4_t a" + "int64x2_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.2D" } }, "Architectures": [ @@ -86588,16 +290301,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_s64", + "name": "vreinterpretq_p8_s8", "arguments": [ - "int64x2_t a" + "int8x16_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.16B" } }, "Architectures": [ @@ -86613,16 +290326,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_s8", + "name": "vreinterpretq_p8_u16", "arguments": [ - "int8x16_t a" + "uint16x8_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.8H" } }, "Architectures": [ @@ -86638,12 +290351,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_u32", + "name": "vreinterpretq_p8_u32", "arguments": [ "uint32x4_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { @@ -86663,12 +290376,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_u64", + "name": "vreinterpretq_p8_u64", "arguments": [ "uint64x2_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { @@ -86688,12 +290401,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_u8", + "name": "vreinterpretq_p8_u8", "arguments": [ "uint8x16_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { @@ -86713,12 +290426,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_f16", + "name": "vreinterpretq_s16_f16", "arguments": [ "float16x8_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -86738,12 +290451,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_f32", + "name": "vreinterpretq_s16_f32", "arguments": [ "float32x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -86763,12 +290476,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_f64", + "name": "vreinterpretq_s16_f64", "arguments": [ "float64x2_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -86786,12 +290499,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_p128", + "name": "vreinterpretq_s16_p128", "arguments": [ "poly128_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -86810,12 +290523,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_p16", + "name": "vreinterpretq_s16_p16", "arguments": [ "poly16x8_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -86835,12 +290548,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_p64", + "name": "vreinterpretq_s16_p64", "arguments": [ "poly64x2_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -86859,12 +290572,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_p8", + "name": "vreinterpretq_s16_p8", "arguments": [ "poly8x16_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -86884,16 +290597,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_s16", + "name": "vreinterpretq_s16_s32", "arguments": [ - "int16x8_t a" + "int32x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.4S" } }, "Architectures": [ @@ -86909,16 +290622,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_s32", + "name": "vreinterpretq_s16_s64", "arguments": [ - "int32x4_t a" + "int64x2_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.2D" } }, "Architectures": [ @@ -86934,16 +290647,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_s64", + "name": "vreinterpretq_s16_s8", "arguments": [ - "int64x2_t a" + "int8x16_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.16B" } }, "Architectures": [ @@ -86959,16 +290672,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_s8", + "name": "vreinterpretq_s16_u16", "arguments": [ - "int8x16_t a" + "uint16x8_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.8H" } }, "Architectures": [ @@ -86984,16 +290697,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_u16", + "name": "vreinterpretq_s16_u32", "arguments": [ - "uint16x8_t a" + "uint32x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.4S" } }, "Architectures": [ @@ -87009,12 +290722,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_u64", + "name": "vreinterpretq_s16_u64", "arguments": [ "uint64x2_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -87034,12 +290747,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_u8", + "name": "vreinterpretq_s16_u8", "arguments": [ "uint8x16_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -87059,12 +290772,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_f16", + "name": "vreinterpretq_s32_f16", "arguments": [ "float16x8_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87084,12 +290797,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_f32", + "name": "vreinterpretq_s32_f32", "arguments": [ "float32x4_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87109,12 +290822,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_f64", + "name": "vreinterpretq_s32_f64", "arguments": [ "float64x2_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87132,12 +290845,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_p128", + "name": "vreinterpretq_s32_p128", "arguments": [ "poly128_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87156,12 +290869,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_p16", + "name": "vreinterpretq_s32_p16", "arguments": [ "poly16x8_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87181,12 +290894,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_p64", + "name": "vreinterpretq_s32_p64", "arguments": [ "poly64x2_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87205,12 +290918,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_p8", + "name": "vreinterpretq_s32_p8", "arguments": [ "poly8x16_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87230,12 +290943,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_s16", + "name": "vreinterpretq_s32_s16", "arguments": [ "int16x8_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87255,37 +290968,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_s32", - "arguments": [ - "int32x4_t a" - ], - "return_type": { - "value": "uint64x2_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.4S" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_s64", + "name": "vreinterpretq_s32_s64", "arguments": [ "int64x2_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87305,12 +290993,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_s8", + "name": "vreinterpretq_s32_s8", "arguments": [ "int8x16_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87330,12 +291018,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_u16", + "name": "vreinterpretq_s32_u16", "arguments": [ "uint16x8_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87355,12 +291043,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_u32", + "name": "vreinterpretq_s32_u32", "arguments": [ "uint32x4_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87380,41 +291068,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_u8", - "arguments": [ - "uint8x16_t a" - ], - "return_type": { - "value": "uint64x2_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.16B" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_f16", + "name": "vreinterpretq_s32_u64", "arguments": [ - "float16x8_t a" + "uint64x2_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2D" } }, "Architectures": [ @@ -87430,16 +291093,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_f32", + "name": "vreinterpretq_s32_u8", "arguments": [ - "float32x4_t a" + "uint8x16_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.16B" } }, "Architectures": [ @@ -87455,59 +291118,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_f64", - "arguments": [ - "float64x2_t a" - ], - "return_type": { - "value": "uint8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_p128", - "arguments": [ - "poly128_t a" - ], - "return_type": { - "value": "uint8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.1Q" - } - }, - "Architectures": [ - "A32", - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_p16", + "name": "vreinterpretq_s64_f16", "arguments": [ - "poly16x8_t a" + "float16x8_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { @@ -87527,40 +291143,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_p64", - "arguments": [ - "poly64x2_t a" - ], - "return_type": { - "value": "uint8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - } - }, - "Architectures": [ - "A32", - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_p8", + "name": "vreinterpretq_s64_f32", "arguments": [ - "poly8x16_t a" + "float32x4_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.4S" } }, "Architectures": [ @@ -87576,21 +291168,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_s16", + "name": "vreinterpretq_s64_f64", "arguments": [ - "int16x8_t a" + "float64x2_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2D" } }, - "Architectures": [ - "v7", - "A32", + "Architectures": [ "A64" ], "instructions": [ @@ -87601,20 +291191,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_s32", + "name": "vreinterpretq_s64_p128", "arguments": [ - "int32x4_t a" + "poly128_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.1Q" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -87626,16 +291215,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_s64", + "name": "vreinterpretq_s64_p16", "arguments": [ - "int64x2_t a" + "poly16x8_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.8H" } }, "Architectures": [ @@ -87651,20 +291240,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_s8", + "name": "vreinterpretq_s64_p64", "arguments": [ - "int8x16_t a" + "poly64x2_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.2D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -87676,16 +291264,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_u16", + "name": "vreinterpretq_s64_p8", "arguments": [ - "uint16x8_t a" + "poly8x16_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.16B" } }, "Architectures": [ @@ -87701,16 +291289,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_u32", + "name": "vreinterpretq_s64_s16", "arguments": [ - "uint32x4_t a" + "int16x8_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.8H" } }, "Architectures": [ @@ -87726,16 +291314,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_u64", + "name": "vreinterpretq_s64_s32", "arguments": [ - "uint64x2_t a" + "int32x4_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.4S" } }, "Architectures": [ @@ -87751,16 +291339,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vrev16_p8", + "name": "vreinterpretq_s64_s8", "arguments": [ - "poly8x8_t vec" + "int8x16_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "int64x2_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8B" + "a": { + "register": "Vd.16B" } }, "Architectures": [ @@ -87770,22 +291358,22 @@ ], "instructions": [ [ - "REV16" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev16_s8", + "name": "vreinterpretq_s64_u16", "arguments": [ - "int8x8_t vec" + "uint16x8_t a" ], "return_type": { - "value": "int8x8_t" + "value": "int64x2_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8B" + "a": { + "register": "Vd.8H" } }, "Architectures": [ @@ -87795,22 +291383,22 @@ ], "instructions": [ [ - "REV16" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev16_u8", + "name": "vreinterpretq_s64_u32", "arguments": [ - "uint8x8_t vec" + "uint32x4_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x2_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8B" + "a": { + "register": "Vd.4S" } }, "Architectures": [ @@ -87820,22 +291408,22 @@ ], "instructions": [ [ - "REV16" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev16q_p8", + "name": "vreinterpretq_s64_u64", "arguments": [ - "poly8x16_t vec" + "uint64x2_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.16B" + "a": { + "register": "Vd.2D" } }, "Architectures": [ @@ -87845,22 +291433,22 @@ ], "instructions": [ [ - "REV16" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev16q_s8", + "name": "vreinterpretq_s64_u8", "arguments": [ - "int8x16_t vec" + "uint8x16_t a" ], "return_type": { - "value": "int8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.16B" + "a": { + "register": "Vd.16B" } }, "Architectures": [ @@ -87870,22 +291458,22 @@ ], "instructions": [ [ - "REV16" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev16q_u8", + "name": "vreinterpretq_s8_f16", "arguments": [ - "uint8x16_t vec" + "float16x8_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.16B" + "a": { + "register": "Vd.8H" } }, "Architectures": [ @@ -87895,22 +291483,22 @@ ], "instructions": [ [ - "REV16" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32_p16", + "name": "vreinterpretq_s8_f32", "arguments": [ - "poly16x4_t vec" + "float32x4_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.4H" + "a": { + "register": "Vd.4S" } }, "Architectures": [ @@ -87920,72 +291508,69 @@ ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32_p8", + "name": "vreinterpretq_s8_f64", "arguments": [ - "poly8x8_t vec" + "float64x2_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8B" + "a": { + "register": "Vd.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32_s16", + "name": "vreinterpretq_s8_p128", "arguments": [ - "int16x4_t vec" + "poly128_t a" ], "return_type": { - "value": "int16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.4H" + "a": { + "register": "Vd.1Q" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32_s8", + "name": "vreinterpretq_s8_p16", "arguments": [ - "int8x8_t vec" + "poly16x8_t a" ], "return_type": { - "value": "int8x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8B" + "a": { + "register": "Vd.8H" } }, "Architectures": [ @@ -87995,47 +291580,46 @@ ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32_u16", + "name": "vreinterpretq_s8_p64", "arguments": [ - "uint16x4_t vec" + "poly64x2_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.4H" + "a": { + "register": "Vd.2D" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32_u8", + "name": "vreinterpretq_s8_p8", "arguments": [ - "uint8x8_t vec" + "poly8x16_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8B" + "a": { + "register": "Vd.16B" } }, "Architectures": [ @@ -88045,22 +291629,22 @@ ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32q_p16", + "name": "vreinterpretq_s8_s16", "arguments": [ - "poly16x8_t vec" + "int16x8_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8H" + "a": { + "register": "Vd.8H" } }, "Architectures": [ @@ -88070,22 +291654,22 @@ ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32q_p8", + "name": "vreinterpretq_s8_s32", "arguments": [ - "poly8x16_t vec" + "int32x4_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.16B" + "a": { + "register": "Vd.4S" } }, "Architectures": [ @@ -88095,22 +291679,22 @@ ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32q_s16", + "name": "vreinterpretq_s8_s64", "arguments": [ - "int16x8_t vec" + "int64x2_t a" ], "return_type": { - "value": "int16x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8H" + "a": { + "register": "Vd.2D" } }, "Architectures": [ @@ -88120,22 +291704,22 @@ ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32q_s8", + "name": "vreinterpretq_s8_u16", "arguments": [ - "int8x16_t vec" + "uint16x8_t a" ], "return_type": { "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.16B" + "a": { + "register": "Vd.8H" } }, "Architectures": [ @@ -88145,22 +291729,22 @@ ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32q_u16", + "name": "vreinterpretq_s8_u32", "arguments": [ - "uint16x8_t vec" + "uint32x4_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8H" + "a": { + "register": "Vd.4S" } }, "Architectures": [ @@ -88170,22 +291754,22 @@ ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32q_u8", + "name": "vreinterpretq_s8_u64", "arguments": [ - "uint8x16_t vec" + "uint64x2_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.16B" + "a": { + "register": "Vd.2D" } }, "Architectures": [ @@ -88195,22 +291779,22 @@ ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64_f16", + "name": "vreinterpretq_s8_u8", "arguments": [ - "float16x4_t vec" + "uint8x16_t a" ], "return_type": { - "value": "float16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.4H" + "a": { + "register": "Vd.16B" } }, "Architectures": [ @@ -88220,22 +291804,22 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64_f32", + "name": "vreinterpretq_u16_f16", "arguments": [ - "float32x2_t vec" + "float16x8_t a" ], "return_type": { - "value": "float32x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.2S" + "a": { + "register": "Vd.8H" } }, "Architectures": [ @@ -88245,22 +291829,22 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64_p16", + "name": "vreinterpretq_u16_f32", "arguments": [ - "poly16x4_t vec" + "float32x4_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.4H" + "a": { + "register": "Vd.4S" } }, "Architectures": [ @@ -88270,72 +291854,69 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64_p8", + "name": "vreinterpretq_u16_f64", "arguments": [ - "poly8x8_t vec" + "float64x2_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8B" + "a": { + "register": "Vd.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64_s16", + "name": "vreinterpretq_u16_p128", "arguments": [ - "int16x4_t vec" + "poly128_t a" ], "return_type": { - "value": "int16x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.4H" + "a": { + "register": "Vd.1Q" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64_s32", + "name": "vreinterpretq_u16_p16", "arguments": [ - "int32x2_t vec" + "poly16x8_t a" ], "return_type": { - "value": "int32x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.2S" + "a": { + "register": "Vd.8H" } }, "Architectures": [ @@ -88345,47 +291926,46 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64_s8", + "name": "vreinterpretq_u16_p64", "arguments": [ - "int8x8_t vec" + "poly64x2_t a" ], "return_type": { - "value": "int8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8B" + "a": { + "register": "Vd.2D" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64_u16", + "name": "vreinterpretq_u16_p8", "arguments": [ - "uint16x4_t vec" + "poly8x16_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.4H" + "a": { + "register": "Vd.16B" } }, "Architectures": [ @@ -88395,22 +291975,22 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64_u32", + "name": "vreinterpretq_u16_s16", "arguments": [ - "uint32x2_t vec" + "int16x8_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.2S" + "a": { + "register": "Vd.8H" } }, "Architectures": [ @@ -88420,22 +292000,22 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64_u8", + "name": "vreinterpretq_u16_s32", "arguments": [ - "uint8x8_t vec" + "int32x4_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8B" + "a": { + "register": "Vd.4S" } }, "Architectures": [ @@ -88445,22 +292025,22 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64q_f16", + "name": "vreinterpretq_u16_s64", "arguments": [ - "float16x8_t vec" + "int64x2_t a" ], "return_type": { - "value": "float16x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8H" + "a": { + "register": "Vd.2D" } }, "Architectures": [ @@ -88470,22 +292050,22 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64q_f32", + "name": "vreinterpretq_u16_s8", "arguments": [ - "float32x4_t vec" + "int8x16_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.4S" + "a": { + "register": "Vd.16B" } }, "Architectures": [ @@ -88495,22 +292075,22 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64q_p16", + "name": "vreinterpretq_u16_u32", "arguments": [ - "poly16x8_t vec" + "uint32x4_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8H" + "a": { + "register": "Vd.4S" } }, "Architectures": [ @@ -88520,22 +292100,22 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64q_p8", + "name": "vreinterpretq_u16_u64", "arguments": [ - "poly8x16_t vec" + "uint64x2_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.16B" + "a": { + "register": "Vd.2D" } }, "Architectures": [ @@ -88545,22 +292125,22 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64q_s16", + "name": "vreinterpretq_u16_u8", "arguments": [ - "int16x8_t vec" + "uint8x16_t a" ], "return_type": { - "value": "int16x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8H" + "a": { + "register": "Vd.16B" } }, "Architectures": [ @@ -88570,22 +292150,22 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64q_s32", + "name": "vreinterpretq_u32_f16", "arguments": [ - "int32x4_t vec" + "float16x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.4S" + "a": { + "register": "Vd.8H" } }, "Architectures": [ @@ -88595,22 +292175,22 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64q_s8", + "name": "vreinterpretq_u32_f32", "arguments": [ - "int8x16_t vec" + "float32x4_t a" ], "return_type": { - "value": "int8x16_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.16B" + "a": { + "register": "Vd.4S" } }, "Architectures": [ @@ -88620,72 +292200,69 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64q_u16", + "name": "vreinterpretq_u32_f64", "arguments": [ - "uint16x8_t vec" + "float64x2_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8H" + "a": { + "register": "Vd.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64q_u32", + "name": "vreinterpretq_u32_p128", "arguments": [ - "uint32x4_t vec" + "poly128_t a" ], "return_type": { "value": "uint32x4_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.4S" + "a": { + "register": "Vd.1Q" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64q_u8", + "name": "vreinterpretq_u32_p16", "arguments": [ - "uint8x16_t vec" + "poly16x8_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.16B" + "a": { + "register": "Vd.8H" } }, "Architectures": [ @@ -88695,55 +292272,46 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhadd_s16", + "name": "vreinterpretq_u32_p64", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "poly64x2_t a" ], "return_type": { - "value": "int16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" + "register": "Vd.2D" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SRHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhadd_s32", + "name": "vreinterpretq_u32_p8", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "poly8x16_t a" ], "return_type": { - "value": "int32x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" + "register": "Vd.16B" } }, "Architectures": [ @@ -88753,26 +292321,22 @@ ], "instructions": [ [ - "SRHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhadd_s8", + "name": "vreinterpretq_u32_s16", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "int16x8_t a" ], "return_type": { - "value": "int8x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "register": "Vd.8H" } }, "Architectures": [ @@ -88782,26 +292346,22 @@ ], "instructions": [ [ - "SRHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhadd_u16", + "name": "vreinterpretq_u32_s32", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "int32x4_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" + "register": "Vd.4S" } }, "Architectures": [ @@ -88811,26 +292371,22 @@ ], "instructions": [ [ - "URHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhadd_u32", + "name": "vreinterpretq_u32_s64", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "int64x2_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" + "register": "Vd.2D" } }, "Architectures": [ @@ -88840,26 +292396,22 @@ ], "instructions": [ [ - "URHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhadd_u8", + "name": "vreinterpretq_u32_s8", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "int8x16_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "register": "Vd.16B" } }, "Architectures": [ @@ -88869,26 +292421,22 @@ ], "instructions": [ [ - "URHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhaddq_s16", + "name": "vreinterpretq_u32_u16", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "uint16x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Vd.8H" } }, "Architectures": [ @@ -88898,26 +292446,22 @@ ], "instructions": [ [ - "SRHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhaddq_s32", + "name": "vreinterpretq_u32_u64", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "uint64x2_t a" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vd.2D" } }, "Architectures": [ @@ -88927,26 +292471,22 @@ ], "instructions": [ [ - "SRHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhaddq_s8", + "name": "vreinterpretq_u32_u8", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "uint8x16_t a" ], "return_type": { - "value": "int8x16_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" + "register": "Vd.16B" } }, "Architectures": [ @@ -88956,26 +292496,22 @@ ], "instructions": [ [ - "SRHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhaddq_u16", + "name": "vreinterpretq_u64_f16", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "float16x8_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Vd.8H" } }, "Architectures": [ @@ -88985,26 +292521,22 @@ ], "instructions": [ [ - "URHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhaddq_u32", + "name": "vreinterpretq_u64_f32", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "float32x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vd.4S" } }, "Architectures": [ @@ -89014,350 +292546,368 @@ ], "instructions": [ [ - "URHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhaddq_u8", + "name": "vreinterpretq_u64_f64", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "float64x2_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" + "register": "Vd.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd32x_f32", + "name": "vreinterpretq_u64_p128", "arguments": [ - "float32x2_t a" + "poly128_t a" ], "return_type": { - "value": "float32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.1Q" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FRINT32X" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd32x_f64", + "name": "vreinterpretq_u64_p16", "arguments": [ - "float64x1_t a" + "poly16x8_t a" ], "return_type": { - "value": "float64x1_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT32X" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd32xq_f32", + "name": "vreinterpretq_u64_p64", "arguments": [ - "float32x4_t a" + "poly64x2_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.2D" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FRINT32X" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd32xq_f64", + "name": "vreinterpretq_u64_p8", "arguments": [ - "float64x2_t a" + "poly8x16_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT32X" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd32z_f32", + "name": "vreinterpretq_u64_s16", "arguments": [ - "float32x2_t a" + "int16x8_t a" ], "return_type": { - "value": "float32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT32Z" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd32z_f64", + "name": "vreinterpretq_u64_s32", "arguments": [ - "float64x1_t a" + "int32x4_t a" ], "return_type": { - "value": "float64x1_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT32Z" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd32zq_f32", + "name": "vreinterpretq_u64_s64", "arguments": [ - "float32x4_t a" + "int64x2_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT32Z" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd32zq_f64", + "name": "vreinterpretq_u64_s8", "arguments": [ - "float64x2_t a" + "int8x16_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT32Z" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd64x_f32", + "name": "vreinterpretq_u64_u16", "arguments": [ - "float32x2_t a" + "uint16x8_t a" ], "return_type": { - "value": "float32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT64X" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd64x_f64", + "name": "vreinterpretq_u64_u32", "arguments": [ - "float64x1_t a" + "uint32x4_t a" ], "return_type": { - "value": "float64x1_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT64X" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd64xq_f32", + "name": "vreinterpretq_u64_u8", "arguments": [ - "float32x4_t a" + "uint8x16_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT64X" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd64xq_f64", + "name": "vreinterpretq_u8_f16", "arguments": [ - "float64x2_t a" + "float16x8_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT64X" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd64z_f32", + "name": "vreinterpretq_u8_f32", "arguments": [ - "float32x2_t a" + "float32x4_t a" ], "return_type": { - "value": "float32x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT64Z" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd64z_f64", + "name": "vreinterpretq_u8_f64", "arguments": [ - "float64x1_t a" + "float64x2_t a" ], "return_type": { - "value": "float64x1_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.2D" } }, "Architectures": [ @@ -89365,68 +292915,71 @@ ], "instructions": [ [ - "FRINT64Z" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd64zq_f32", + "name": "vreinterpretq_u8_p128", "arguments": [ - "float32x4_t a" + "poly128_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.1Q" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FRINT64Z" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd64zq_f64", + "name": "vreinterpretq_u8_p16", "arguments": [ - "float64x2_t a" + "poly16x8_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT64Z" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd_f16", + "name": "vreinterpretq_u8_p64", "arguments": [ - "float16x4_t a" + "poly64x2_t a" ], "return_type": { - "value": "float16x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.2D" } }, "Architectures": [ @@ -89435,1181 +292988,1256 @@ ], "instructions": [ [ - "FRINTZ" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd_f32", + "name": "vreinterpretq_u8_p8", "arguments": [ - "float32x2_t a" + "poly8x16_t a" ], "return_type": { - "value": "float32x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTZ" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd_f64", + "name": "vreinterpretq_u8_s16", "arguments": [ - "float64x1_t a" + "int16x8_t a" ], "return_type": { - "value": "float64x1_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTZ" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnda_f16", + "name": "vreinterpretq_u8_s32", "arguments": [ - "float16x4_t a" + "int32x4_t a" ], "return_type": { - "value": "float16x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTA" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnda_f32", + "name": "vreinterpretq_u8_s64", "arguments": [ - "float32x2_t a" + "int64x2_t a" ], "return_type": { - "value": "float32x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.2D" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTA" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnda_f64", + "name": "vreinterpretq_u8_s8", "arguments": [ - "float64x1_t a" + "int8x16_t a" ], "return_type": { - "value": "float64x1_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTA" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndah_f16", + "name": "vreinterpretq_u8_u16", "arguments": [ - "float16_t a" + "uint16x8_t a" ], "return_type": { - "value": "float16_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTA" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndaq_f16", + "name": "vreinterpretq_u8_u32", "arguments": [ - "float16x8_t a" + "uint32x4_t a" ], "return_type": { - "value": "float16x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTA" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndaq_f32", + "name": "vreinterpretq_u8_u64", "arguments": [ - "float32x4_t a" + "uint64x2_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.2D" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTA" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndaq_f64", + "name": "vrev16_p8", "arguments": [ - "float64x2_t a" + "poly8x8_t vec" ], "return_type": { - "value": "float64x2_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "vec": { + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTA" + "REV16" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndh_f16", + "name": "vrev16_s8", "arguments": [ - "float16_t a" + "int8x8_t vec" ], "return_type": { - "value": "float16_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "vec": { + "register": "Vn.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTZ" + "REV16" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndi_f16", + "name": "vrev16_u8", "arguments": [ - "float16x4_t a" + "uint8x8_t vec" ], "return_type": { - "value": "float16x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "vec": { + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTI" + "REV16" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndi_f32", + "name": "vrev16q_p8", "arguments": [ - "float32x2_t a" + "poly8x16_t vec" ], "return_type": { - "value": "float32x2_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "vec": { + "register": "Vn.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTI" + "REV16" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndi_f64", + "name": "vrev16q_s8", "arguments": [ - "float64x1_t a" + "int8x16_t vec" ], "return_type": { - "value": "float64x1_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "vec": { + "register": "Vn.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTI" + "REV16" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndih_f16", + "name": "vrev16q_u8", "arguments": [ - "float16_t a" + "uint8x16_t vec" ], "return_type": { - "value": "float16_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "vec": { + "register": "Vn.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTI" + "REV16" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndiq_f16", + "name": "vrev32_p16", "arguments": [ - "float16x8_t a" + "poly16x4_t vec" ], "return_type": { - "value": "float16x8_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "vec": { + "register": "Vn.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTI" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndiq_f32", + "name": "vrev32_p8", "arguments": [ - "float32x4_t a" + "poly8x8_t vec" ], "return_type": { - "value": "float32x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "vec": { + "register": "Vn.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTI" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndiq_f64", + "name": "vrev32_s16", "arguments": [ - "float64x2_t a" + "int16x4_t vec" ], "return_type": { - "value": "float64x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "vec": { + "register": "Vn.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTI" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndm_f16", + "name": "vrev32_s8", "arguments": [ - "float16x4_t a" + "int8x8_t vec" ], "return_type": { - "value": "float16x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "vec": { + "register": "Vn.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTM" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndm_f32", + "name": "vrev32_u16", "arguments": [ - "float32x2_t a" + "uint16x4_t vec" ], "return_type": { - "value": "float32x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "vec": { + "register": "Vn.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTM" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndm_f64", + "name": "vrev32_u8", "arguments": [ - "float64x1_t a" + "uint8x8_t vec" ], "return_type": { - "value": "float64x1_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "vec": { + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTM" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndmh_f16", + "name": "vrev32q_p16", "arguments": [ - "float16_t a" + "poly16x8_t vec" ], "return_type": { - "value": "float16_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "vec": { + "register": "Vn.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTM" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndmq_f16", + "name": "vrev32q_p8", "arguments": [ - "float16x8_t a" + "poly8x16_t vec" ], "return_type": { - "value": "float16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "vec": { + "register": "Vn.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTM" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndmq_f32", + "name": "vrev32q_s16", "arguments": [ - "float32x4_t a" + "int16x8_t vec" ], "return_type": { - "value": "float32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "vec": { + "register": "Vn.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTM" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndmq_f64", + "name": "vrev32q_s8", "arguments": [ - "float64x2_t a" + "int8x16_t vec" ], "return_type": { - "value": "float64x2_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "vec": { + "register": "Vn.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTM" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndn_f16", + "name": "vrev32q_u16", "arguments": [ - "float16x4_t a" + "uint16x8_t vec" ], "return_type": { - "value": "float16x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "vec": { + "register": "Vn.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTN" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndn_f32", + "name": "vrev32q_u8", "arguments": [ - "float32x2_t a" + "uint8x16_t vec" ], "return_type": { - "value": "float32x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "vec": { + "register": "Vn.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTN" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndn_f64", + "name": "vrev64_f16", "arguments": [ - "float64x1_t a" + "float16x4_t vec" ], "return_type": { - "value": "float64x1_t" + "value": "float16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "vec": { + "register": "Vn.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTN" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndnh_f16", + "name": "vrev64_f32", "arguments": [ - "float16_t a" + "float32x2_t vec" ], "return_type": { - "value": "float16_t" + "value": "float32x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "vec": { + "register": "Vn.2S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTN" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndnq_f16", + "name": "vrev64_p16", "arguments": [ - "float16x8_t a" + "poly16x4_t vec" ], "return_type": { - "value": "float16x8_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "vec": { + "register": "Vn.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTN" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndnq_f32", + "name": "vrev64_p8", "arguments": [ - "float32x4_t a" + "poly8x8_t vec" ], "return_type": { - "value": "float32x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "vec": { + "register": "Vn.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTN" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndnq_f64", + "name": "vrev64_s16", "arguments": [ - "float64x2_t a" + "int16x4_t vec" ], "return_type": { - "value": "float64x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "vec": { + "register": "Vn.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTN" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndns_f32", + "name": "vrev64_s32", "arguments": [ - "float32_t a" + "int32x2_t vec" ], "return_type": { - "value": "float32_t" + "value": "int32x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "vec": { + "register": "Vn.2S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTN" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndp_f16", + "name": "vrev64_s8", "arguments": [ - "float16x4_t a" + "int8x8_t vec" ], "return_type": { - "value": "float16x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "vec": { + "register": "Vn.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTP" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndp_f32", + "name": "vrev64_u16", "arguments": [ - "float32x2_t a" + "uint16x4_t vec" ], "return_type": { - "value": "float32x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "vec": { + "register": "Vn.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTP" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndp_f64", + "name": "vrev64_u32", "arguments": [ - "float64x1_t a" + "uint32x2_t vec" ], "return_type": { - "value": "float64x1_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "vec": { + "register": "Vn.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTP" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndph_f16", + "name": "vrev64_u8", "arguments": [ - "float16_t a" + "uint8x8_t vec" ], "return_type": { - "value": "float16_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "vec": { + "register": "Vn.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTP" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndpq_f16", + "name": "vrev64q_f16", "arguments": [ - "float16x8_t a" + "float16x8_t vec" ], "return_type": { "value": "float16x8_t" }, "Arguments_Preparation": { - "a": { + "vec": { "register": "Vn.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTP" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndpq_f32", + "name": "vrev64q_f32", "arguments": [ - "float32x4_t a" + "float32x4_t vec" ], "return_type": { "value": "float32x4_t" }, "Arguments_Preparation": { - "a": { + "vec": { "register": "Vn.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTP" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndpq_f64", + "name": "vrev64q_p16", "arguments": [ - "float64x2_t a" + "poly16x8_t vec" ], "return_type": { - "value": "float64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "vec": { + "register": "Vn.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTP" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndq_f16", + "name": "vrev64q_p8", "arguments": [ - "float16x8_t a" + "poly8x16_t vec" ], "return_type": { - "value": "float16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "vec": { + "register": "Vn.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTZ" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndq_f32", + "name": "vrev64q_s16", "arguments": [ - "float32x4_t a" + "int16x8_t vec" ], "return_type": { - "value": "float32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "vec": { + "register": "Vn.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTZ" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndq_f64", + "name": "vrev64q_s32", "arguments": [ - "float64x2_t a" + "int32x4_t vec" ], "return_type": { - "value": "float64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "vec": { + "register": "Vn.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTZ" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndx_f16", + "name": "vrev64q_s8", "arguments": [ - "float16x4_t a" + "int8x16_t vec" ], "return_type": { - "value": "float16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "vec": { + "register": "Vn.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTX" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndx_f32", + "name": "vrev64q_u16", "arguments": [ - "float32x2_t a" + "uint16x8_t vec" ], "return_type": { - "value": "float32x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "vec": { + "register": "Vn.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTX" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndx_f64", + "name": "vrev64q_u32", "arguments": [ - "float64x1_t a" + "uint32x4_t vec" ], "return_type": { - "value": "float64x1_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "vec": { + "register": "Vn.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTX" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndxh_f16", + "name": "vrev64q_u8", "arguments": [ - "float16_t a" + "uint8x16_t vec" ], "return_type": { - "value": "float16_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "vec": { + "register": "Vn.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTX" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndxq_f16", + "name": "vrhadd_s16", "arguments": [ - "float16x8_t a" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "float16x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTX" + "SRHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndxq_f32", + "name": "vrhadd_s32", "arguments": [ - "float32x4_t a" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "float32x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTX" + "SRHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndxq_f64", + "name": "vrhadd_s8", "arguments": [ - "float64x2_t a" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "float64x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTX" + "SRHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshl_s16", + "name": "vrhadd_u16", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "int16x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { @@ -90626,19 +294254,19 @@ ], "instructions": [ [ - "SRSHL" + "URHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshl_s32", + "name": "vrhadd_u32", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "int32x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { @@ -90655,26 +294283,26 @@ ], "instructions": [ [ - "SRSHL" + "URHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshl_s64", + "name": "vrhadd_u8", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "int64x1_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.8B" }, "b": { - "register": "Dm" + "register": "Vm.8B" } }, "Architectures": [ @@ -90684,26 +294312,26 @@ ], "instructions": [ [ - "SRSHL" + "URHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshl_s8", + "name": "vrhaddq_s16", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int8x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, "b": { - "register": "Vm.8B" + "register": "Vm.8H" } }, "Architectures": [ @@ -90713,26 +294341,26 @@ ], "instructions": [ [ - "SRSHL" + "SRHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshl_u16", + "name": "vrhaddq_s32", "arguments": [ - "uint16x4_t a", - "int16x4_t b" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.4S" }, "b": { - "register": "Vm.4H" + "register": "Vm.4S" } }, "Architectures": [ @@ -90742,26 +294370,26 @@ ], "instructions": [ [ - "URSHL" + "SRHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshl_u32", + "name": "vrhaddq_s8", "arguments": [ - "uint32x2_t a", - "int32x2_t b" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.16B" }, "b": { - "register": "Vm.2S" + "register": "Vm.16B" } }, "Architectures": [ @@ -90771,26 +294399,26 @@ ], "instructions": [ [ - "URSHL" + "SRHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshl_u64", + "name": "vrhaddq_u16", "arguments": [ - "uint64x1_t a", - "int64x1_t b" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "uint64x1_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.8H" }, "b": { - "register": "Dm" + "register": "Vm.8H" } }, "Architectures": [ @@ -90800,26 +294428,26 @@ ], "instructions": [ [ - "URSHL" + "URHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshl_u8", + "name": "vrhaddq_u32", "arguments": [ - "uint8x8_t a", - "int8x8_t b" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4S" }, "b": { - "register": "Vm.8B" + "register": "Vm.4S" } }, "Architectures": [ @@ -90829,53 +294457,51 @@ ], "instructions": [ [ - "URSHL" + "URHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshld_s64", + "name": "vrhaddq_u8", "arguments": [ - "int64_t a", - "int64_t b" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "int64_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.16B" }, "b": { - "register": "Dm" + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SRSHL" + "URHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshld_u64", + "name": "vrnd32x_f32", "arguments": [ - "uint64_t a", - "int64_t b" + "float32x2_t a" ], "return_type": { - "value": "uint64_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "b": { - "register": "Dm" + "register": "Vn" } }, "Architectures": [ @@ -90883,527 +294509,415 @@ ], "instructions": [ [ - "URSHL" + "FRINT32X" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshlq_s16", + "name": "vrnd32x_f64", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "float64x1_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSHL" + "FRINT32X" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshlq_s32", + "name": "vrnd32xq_f32", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "float32x4_t a" ], "return_type": { - "value": "int32x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSHL" + "FRINT32X" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshlq_s64", + "name": "vrnd32xq_f64", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "float64x2_t a" ], "return_type": { - "value": "int64x2_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Vn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSHL" + "FRINT32X" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshlq_s8", + "name": "vrnd32z_f32", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "float32x2_t a" ], "return_type": { - "value": "int8x16_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" + "register": "Vn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSHL" + "FRINT32Z" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshlq_u16", + "name": "vrnd32z_f64", "arguments": [ - "uint16x8_t a", - "int16x8_t b" + "float64x1_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URSHL" + "FRINT32Z" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshlq_u32", + "name": "vrnd32zq_f32", "arguments": [ - "uint32x4_t a", - "int32x4_t b" + "float32x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URSHL" + "FRINT32Z" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshlq_u64", + "name": "vrnd32zq_f64", "arguments": [ - "uint64x2_t a", - "int64x2_t b" + "float64x2_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Vn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URSHL" + "FRINT32Z" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshlq_u8", + "name": "vrnd64x_f32", "arguments": [ - "uint8x16_t a", - "int8x16_t b" + "float32x2_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" + "register": "Vn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URSHL" + "FRINT64X" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshr_n_s16", + "name": "vrnd64x_f64", "arguments": [ - "int16x4_t a", - "const int n" + "float64x1_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "n": { - "minimum": 1, - "maximum": 16 + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSHR" + "FRINT64X" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshr_n_s32", + "name": "vrnd64xq_f32", "arguments": [ - "int32x2_t a", - "const int n" + "float32x4_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "n": { - "minimum": 1, - "maximum": 32 + "register": "Vn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSHR" + "FRINT64X" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshr_n_s64", + "name": "vrnd64xq_f64", "arguments": [ - "int64x1_t a", - "const int n" + "float64x2_t a" ], "return_type": { - "value": "int64x1_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "n": { - "minimum": 1, - "maximum": 64 + "register": "Vn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSHR" + "FRINT64X" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshr_n_s8", + "name": "vrnd64z_f32", "arguments": [ - "int8x8_t a", - "const int n" + "float32x2_t a" ], "return_type": { - "value": "int8x8_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" - }, - "n": { - "minimum": 1, - "maximum": 8 + "register": "Vn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSHR" + "FRINT64Z" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshr_n_u16", + "name": "vrnd64z_f64", "arguments": [ - "uint16x4_t a", - "const int n" + "float64x1_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "n": { - "minimum": 1, - "maximum": 16 + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URSHR" + "FRINT64Z" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshr_n_u32", + "name": "vrnd64zq_f32", "arguments": [ - "uint32x2_t a", - "const int n" + "float32x4_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "n": { - "minimum": 1, - "maximum": 32 + "register": "Vn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URSHR" + "FRINT64Z" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshr_n_u64", + "name": "vrnd64zq_f64", "arguments": [ - "uint64x1_t a", - "const int n" + "float64x2_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "n": { - "minimum": 1, - "maximum": 64 + "register": "Vn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URSHR" + "FRINT64Z" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshr_n_u8", + "name": "vrnd_f16", "arguments": [ - "uint8x8_t a", - "const int n" + "float16x4_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" - }, - "n": { - "minimum": 1, - "maximum": 8 + "register": "Vn.4H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "URSHR" + "FRINTZ" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrd_n_s64", + "name": "vrnd_f32", "arguments": [ - "int64_t a", - "const int n" + "float32x2_t a" ], "return_type": { - "value": "int64_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "n": { - "minimum": 1, - "maximum": 64 + "register": "Vn.2S" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "SRSHR" + "FRINTZ" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrd_n_u64", + "name": "vrnd_f64", "arguments": [ - "uint64_t a", - "const int n" + "float64x1_t a" ], "return_type": { - "value": "uint64_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { "register": "Dn" - }, - "n": { - "minimum": 1, - "maximum": 64 } }, "Architectures": [ @@ -91411,95 +294925,70 @@ ], "instructions": [ [ - "URSHR" + "FRINTZ" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_high_n_s16", + "name": "vrnda_f16", "arguments": [ - "int8x8_t r", - "int16x8_t a", - "const int n" + "float16x4_t a" ], "return_type": { - "value": "int8x16_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "n": { - "minimum": 1, - "maximum": 8 - }, - "r": { - "register": "Vd.8B" + "register": "Vn.4H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "RSHRN2" + "FRINTA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_high_n_s32", + "name": "vrnda_f32", "arguments": [ - "int16x4_t r", - "int32x4_t a", - "const int n" + "float32x2_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "n": { - "minimum": 1, - "maximum": 16 - }, - "r": { - "register": "Vd.4H" + "register": "Vn.2S" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "RSHRN2" + "FRINTA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_high_n_s64", + "name": "vrnda_f64", "arguments": [ - "int32x2_t r", - "int64x2_t a", - "const int n" + "float64x1_t a" ], "return_type": { - "value": "int32x4_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "n": { - "minimum": 1, - "maximum": 32 - }, - "r": { - "register": "Vd.2S" + "register": "Dn" } }, "Architectures": [ @@ -91507,538 +294996,424 @@ ], "instructions": [ [ - "RSHRN2" + "FRINTA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_high_n_u16", + "name": "vrndah_f16", "arguments": [ - "uint8x8_t r", - "uint16x8_t a", - "const int n" + "float16_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "n": { - "minimum": 1, - "maximum": 8 - }, - "r": { - "register": "Vd.8B" + "register": "Hn" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "RSHRN2" + "FRINTA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_high_n_u32", + "name": "vrndaq_f16", "arguments": [ - "uint16x4_t r", - "uint32x4_t a", - "const int n" + "float16x8_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "n": { - "minimum": 1, - "maximum": 16 - }, - "r": { - "register": "Vd.4H" + "register": "Vn.8H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "RSHRN2" + "FRINTA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_high_n_u64", + "name": "vrndaq_f32", "arguments": [ - "uint32x2_t r", - "uint64x2_t a", - "const int n" + "float32x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "n": { - "minimum": 1, - "maximum": 32 - }, - "r": { - "register": "32(Vd)" + "register": "Vn.4S" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "RSHRN2" + "FRINTA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_n_s16", + "name": "vrndaq_f64", "arguments": [ - "int16x8_t a", - "const int n" + "float64x2_t a" ], "return_type": { - "value": "int8x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "n": { - "minimum": 1, - "maximum": 8 + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "RSHRN" + "FRINTA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_n_s32", + "name": "vrndh_f16", "arguments": [ - "int32x4_t a", - "const int n" + "float16_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "n": { - "minimum": 1, - "maximum": 16 + "register": "Hn" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "RSHRN" + "FRINTZ" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_n_s64", + "name": "vrndi_f16", "arguments": [ - "int64x2_t a", - "const int n" + "float16x4_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "n": { - "minimum": 1, - "maximum": 32 + "register": "Vn.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "RSHRN" + "FRINTI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_n_u16", + "name": "vrndi_f32", "arguments": [ - "uint16x8_t a", - "const int n" + "float32x2_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "n": { - "minimum": 1, - "maximum": 8 + "register": "Vn.2S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "RSHRN" + "FRINTI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_n_u32", + "name": "vrndi_f64", "arguments": [ - "uint32x4_t a", - "const int n" + "float64x1_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "n": { - "minimum": 1, - "maximum": 16 + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "RSHRN" + "FRINTI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_n_u64", + "name": "vrndih_f16", "arguments": [ - "uint64x2_t a", - "const int n" + "float16_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "n": { - "minimum": 1, - "maximum": 32 + "register": "Hn" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "RSHRN" + "FRINTI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrq_n_s16", + "name": "vrndiq_f16", "arguments": [ - "int16x8_t a", - "const int n" + "float16x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" - }, - "n": { - "minimum": 1, - "maximum": 16 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSHR" + "FRINTI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrq_n_s32", + "name": "vrndiq_f32", "arguments": [ - "int32x4_t a", - "const int n" + "float32x4_t a" ], "return_type": { - "value": "int32x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4S" - }, - "n": { - "minimum": 1, - "maximum": 32 } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SRSHR" + "FRINTI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrq_n_s64", + "name": "vrndiq_f64", "arguments": [ - "int64x2_t a", - "const int n" + "float64x2_t a" ], "return_type": { - "value": "int64x2_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2D" - }, - "n": { - "minimum": 1, - "maximum": 64 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSHR" + "FRINTI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrq_n_s8", + "name": "vrndm_f16", "arguments": [ - "int8x16_t a", - "const int n" + "float16x4_t a" ], "return_type": { - "value": "int8x16_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" - }, - "n": { - "minimum": 1, - "maximum": 8 + "register": "Vn.4H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SRSHR" + "FRINTM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrq_n_u16", + "name": "vrndm_f32", "arguments": [ - "uint16x8_t a", - "const int n" + "float32x2_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "n": { - "minimum": 1, - "maximum": 16 + "register": "Vn.2S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "URSHR" + "FRINTM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrq_n_u32", + "name": "vrndm_f64", "arguments": [ - "uint32x4_t a", - "const int n" + "float64x1_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "n": { - "minimum": 1, - "maximum": 32 + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URSHR" + "FRINTM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrq_n_u64", + "name": "vrndmh_f16", "arguments": [ - "uint64x2_t a", - "const int n" + "float16_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "n": { - "minimum": 1, - "maximum": 64 + "register": "Hn" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "URSHR" + "FRINTM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrq_n_u8", + "name": "vrndmq_f16", "arguments": [ - "uint8x16_t a", - "const int n" + "float16x8_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" - }, - "n": { - "minimum": 1, - "maximum": 8 + "register": "Vn.8H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "URSHR" + "FRINTM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrte_f16", + "name": "vrndmq_f32", "arguments": [ - "float16x4_t a" + "float32x4_t a" ], "return_type": { - "value": "float16x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.4S" } }, "Architectures": [ @@ -92047,66 +295422,65 @@ ], "instructions": [ [ - "FRSQRTE" + "FRINTM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrte_f32", + "name": "vrndmq_f64", "arguments": [ - "float32x2_t a" + "float64x2_t a" ], "return_type": { - "value": "float32x2_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FRSQRTE" + "FRINTM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrte_f64", + "name": "vrndn_f16", "arguments": [ - "float64x1_t a" + "float16x4_t a" ], "return_type": { - "value": "float64x1_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.4H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FRSQRTE" + "FRINTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrte_u32", + "name": "vrndn_f32", "arguments": [ - "uint32x2_t a" + "float32x2_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { @@ -92114,24 +295488,23 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "URSQRTE" + "FRINTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrted_f64", + "name": "vrndn_f64", "arguments": [ - "float64_t a" + "float64x1_t a" ], "return_type": { - "value": "float64_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { @@ -92139,17 +295512,18 @@ } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FRSQRTE" + "FRINTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrteh_f16", + "name": "vrndnh_f16", "arguments": [ "float16_t a" ], @@ -92162,17 +295536,18 @@ } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FRSQRTE" + "FRINTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrteq_f16", + "name": "vrndnq_f16", "arguments": [ "float16x8_t a" ], @@ -92190,13 +295565,13 @@ ], "instructions": [ [ - "FRSQRTE" + "FRINTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrteq_f32", + "name": "vrndnq_f32", "arguments": [ "float32x4_t a" ], @@ -92209,19 +295584,18 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "FRSQRTE" + "FRINTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrteq_f64", + "name": "vrndnq_f64", "arguments": [ "float64x2_t a" ], @@ -92234,42 +295608,18 @@ } }, "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FRSQRTE" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vrsqrteq_u32", - "arguments": [ - "uint32x4_t a" - ], - "return_type": { - "value": "uint32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - } - }, - "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "URSQRTE" + "FRINTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrtes_f32", + "name": "vrndns_f32", "arguments": [ "float32_t a" ], @@ -92282,20 +295632,20 @@ } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FRSQRTE" + "FRINTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrts_f16", + "name": "vrndp_f16", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "float16x4_t a" ], "return_type": { "value": "float16x4_t" @@ -92303,9 +295653,6 @@ "Arguments_Preparation": { "a": { "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" } }, "Architectures": [ @@ -92314,16 +295661,15 @@ ], "instructions": [ [ - "FRSQRTS" + "FRINTP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrts_f32", + "name": "vrndp_f32", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "float32x2_t a" ], "return_type": { "value": "float32x2_t" @@ -92331,28 +295677,23 @@ "Arguments_Preparation": { "a": { "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "FRSQRTS" + "FRINTP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrts_f64", + "name": "vrndp_f64", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "float64x1_t a" ], "return_type": { "value": "float64x1_t" @@ -92360,9 +295701,6 @@ "Arguments_Preparation": { "a": { "register": "Dn" - }, - "b": { - "register": "Dm" } }, "Architectures": [ @@ -92370,80 +295708,70 @@ ], "instructions": [ [ - "FRSQRTS" + "FRINTP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrtsd_f64", + "name": "vrndph_f16", "arguments": [ - "float64_t a", - "float64_t b" + "float16_t a" ], "return_type": { - "value": "float64_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "b": { - "register": "Dm" + "register": "Hn" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FRSQRTS" + "FRINTP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrtsh_f16", + "name": "vrndpq_f16", "arguments": [ - "float16_t a", - "float16_t b" + "float16x8_t a" ], "return_type": { - "value": "float16_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" - }, - "b": { - "register": "Hm" + "register": "Vn.8H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FRSQRTS" + "FRINTP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrtsq_f16", + "name": "vrndpq_f32", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "float32x4_t a" ], "return_type": { - "value": "float16x8_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Vn.4S" } }, "Architectures": [ @@ -92452,450 +295780,344 @@ ], "instructions": [ [ - "FRSQRTS" + "FRINTP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrtsq_f32", + "name": "vrndpq_f64", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "float64x2_t a" ], "return_type": { - "value": "float32x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FRSQRTS" + "FRINTP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrtsq_f64", + "name": "vrndq_f16", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "float16x8_t a" ], "return_type": { - "value": "float64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Vn.8H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FRSQRTS" + "FRINTZ" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrtss_f32", + "name": "vrndq_f32", "arguments": [ - "float32_t a", - "float32_t b" + "float32x4_t a" ], "return_type": { - "value": "float32_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" - }, - "b": { - "register": "Sm" + "register": "Vn.4S" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FRSQRTS" + "FRINTZ" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsra_n_s16", + "name": "vrndq_f64", "arguments": [ - "int16x4_t a", - "int16x4_t b", - "const int n" + "float64x2_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.4H" - }, - "n": { - "minimum": 1, - "maximum": 16 + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSRA" + "FRINTZ" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsra_n_s32", + "name": "vrndx_f16", "arguments": [ - "int32x2_t a", - "int32x2_t b", - "const int n" + "float16x4_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.2S" - }, - "n": { - "minimum": 1, - "maximum": 32 + "register": "Vn.4H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SRSRA" + "FRINTX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsra_n_s64", + "name": "vrndx_f32", "arguments": [ - "int64x1_t a", - "int64x1_t b", - "const int n" + "float32x2_t a" ], "return_type": { - "value": "int64x1_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { - "register": "Dn" - }, - "n": { - "minimum": 1, - "maximum": 64 + "register": "Vn.2S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SRSRA" + "FRINTX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsra_n_s8", + "name": "vrndx_f64", "arguments": [ - "int8x8_t a", - "int8x8_t b", - "const int n" + "float64x1_t a" ], "return_type": { - "value": "int8x8_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "b": { - "register": "Vn.8B" - }, - "n": { - "minimum": 1, - "maximum": 8 + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSRA" + "FRINTX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsra_n_u16", + "name": "vrndxh_f16", "arguments": [ - "uint16x4_t a", - "uint16x4_t b", - "const int n" + "float16_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.4H" - }, - "n": { - "minimum": 1, - "maximum": 16 + "register": "Hn" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "URSRA" + "FRINTX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsra_n_u32", + "name": "vrndxq_f16", "arguments": [ - "uint32x2_t a", - "uint32x2_t b", - "const int n" + "float16x8_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.2S" - }, - "n": { - "minimum": 1, - "maximum": 32 + "register": "Vn.8H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "URSRA" + "FRINTX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsra_n_u64", + "name": "vrndxq_f32", "arguments": [ - "uint64x1_t a", - "uint64x1_t b", - "const int n" + "float32x4_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { - "register": "Dn" - }, - "n": { - "minimum": 1, - "maximum": 64 + "register": "Vn.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "URSRA" + "FRINTX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsra_n_u8", + "name": "vrndxq_f64", "arguments": [ - "uint8x8_t a", - "uint8x8_t b", - "const int n" + "float64x2_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "b": { - "register": "Vn.8B" - }, - "n": { - "minimum": 1, - "maximum": 8 + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URSRA" + "FRINTX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsrad_n_s64", + "name": "vrshl_s16", "arguments": [ - "int64_t a", - "int64_t b", - "const int n" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int64_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.4H" }, "b": { - "register": "Dn" - }, - "n": { - "minimum": 1, - "maximum": 64 + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SRSRA" + "SRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsrad_n_u64", + "name": "vrshl_s32", "arguments": [ - "uint64_t a", - "uint64_t b", - "const int n" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "uint64_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.2S" }, "b": { - "register": "Dn" - }, - "n": { - "minimum": 1, - "maximum": 64 + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "URSRA" + "SRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsraq_n_s16", + "name": "vrshl_s64", "arguments": [ - "int16x8_t a", - "int16x8_t b", - "const int n" + "int64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Dn" }, "b": { - "register": "Vn.8H" - }, - "n": { - "minimum": 1, - "maximum": 16 + "register": "Dm" } }, "Architectures": [ @@ -92905,31 +296127,26 @@ ], "instructions": [ [ - "SRSRA" + "SRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsraq_n_s32", + "name": "vrshl_s8", "arguments": [ - "int32x4_t a", - "int32x4_t b", - "const int n" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.8B" }, "b": { - "register": "Vn.4S" - }, - "n": { - "minimum": 1, - "maximum": 32 + "register": "Vm.8B" } }, "Architectures": [ @@ -92939,31 +296156,26 @@ ], "instructions": [ [ - "SRSRA" + "SRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsraq_n_s64", + "name": "vrshl_u16", "arguments": [ - "int64x2_t a", - "int64x2_t b", - "const int n" + "uint16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.4H" }, "b": { - "register": "Vn.2D" - }, - "n": { - "minimum": 1, - "maximum": 64 + "register": "Vm.4H" } }, "Architectures": [ @@ -92973,31 +296185,26 @@ ], "instructions": [ [ - "SRSRA" + "URSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsraq_n_s8", + "name": "vrshl_u32", "arguments": [ - "int8x16_t a", - "int8x16_t b", - "const int n" + "uint32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "int8x16_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vn.2S" }, "b": { - "register": "Vn.16B" - }, - "n": { - "minimum": 1, - "maximum": 8 + "register": "Vm.2S" } }, "Architectures": [ @@ -93007,31 +296214,26 @@ ], "instructions": [ [ - "SRSRA" + "URSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsraq_n_u16", + "name": "vrshl_u64", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", - "const int n" + "uint64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Dn" }, "b": { - "register": "Vn.8H" - }, - "n": { - "minimum": 1, - "maximum": 16 + "register": "Dm" } }, "Architectures": [ @@ -93041,31 +296243,26 @@ ], "instructions": [ [ - "URSRA" + "URSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsraq_n_u32", + "name": "vrshl_u8", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "const int n" + "uint8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.8B" }, "b": { - "register": "Vn.4S" - }, - "n": { - "minimum": 1, - "maximum": 32 + "register": "Vm.8B" } }, "Architectures": [ @@ -93075,88 +296272,73 @@ ], "instructions": [ [ - "URSRA" + "URSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsraq_n_u64", + "name": "vrshld_s64", "arguments": [ - "uint64x2_t a", - "uint64x2_t b", - "const int n" + "int64_t a", + "int64_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Dn" }, "b": { - "register": "Vn.2D" - }, - "n": { - "minimum": 1, - "maximum": 64 + "register": "Dm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URSRA" + "SRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsraq_n_u8", + "name": "vrshld_u64", "arguments": [ - "uint8x16_t a", - "uint8x16_t b", - "const int n" + "uint64_t a", + "int64_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Dn" }, "b": { - "register": "Vn.16B" - }, - "n": { - "minimum": 1, - "maximum": 8 + "register": "Dm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URSRA" + "URSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_high_s16", + "name": "vrshlq_s16", "arguments": [ - "int8x8_t r", "int16x8_t a", "int16x8_t b" ], "return_type": { - "value": "int8x16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -93164,30 +296346,28 @@ }, "b": { "register": "Vm.8H" - }, - "r": { - "register": "Vd.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "RSUBHN2" + "SRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_high_s32", + "name": "vrshlq_s32", "arguments": [ - "int16x4_t r", "int32x4_t a", "int32x4_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -93195,30 +296375,28 @@ }, "b": { "register": "Vm.4S" - }, - "r": { - "register": "Vd.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "RSUBHN2" + "SRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_high_s64", + "name": "vrshlq_s64", "arguments": [ - "int32x2_t r", "int64x2_t a", "int64x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { @@ -93226,129 +296404,122 @@ }, "b": { "register": "Vm.2D" - }, - "r": { - "register": "Vd.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "RSUBHN2" + "SRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_high_u16", + "name": "vrshlq_s8", "arguments": [ - "uint8x8_t r", - "uint16x8_t a", - "uint16x8_t b" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.16B" }, "b": { - "register": "Vm.8H" - }, - "r": { - "register": "Vd.8B" + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "RSUBHN2" + "SRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_high_u32", + "name": "vrshlq_u16", "arguments": [ - "uint16x4_t r", - "uint32x4_t a", - "uint32x4_t b" + "uint16x8_t a", + "int16x8_t b" ], "return_type": { "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.8H" }, "b": { - "register": "Vm.4S" - }, - "r": { - "register": "Vd.4H" + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "RSUBHN2" + "URSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_high_u64", + "name": "vrshlq_u32", "arguments": [ - "uint32x2_t r", - "uint64x2_t a", - "uint64x2_t b" + "uint32x4_t a", + "int32x4_t b" ], "return_type": { "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.4S" }, "b": { - "register": "Vm.2D" - }, - "r": { - "register": "Vd.2S" + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "RSUBHN2" + "URSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_s16", + "name": "vrshlq_u64", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "uint64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "int8x8_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2D" }, "b": { - "register": "Vm.8H" + "register": "Vm.2D" } }, "Architectures": [ @@ -93358,26 +296529,26 @@ ], "instructions": [ [ - "RSUBHN" + "URSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_s32", + "name": "vrshlq_u8", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "uint8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "int16x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.16B" }, "b": { - "register": "Vm.4S" + "register": "Vm.16B" } }, "Architectures": [ @@ -93387,26 +296558,27 @@ ], "instructions": [ [ - "RSUBHN" + "URSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_s64", + "name": "vrshr_n_s16", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "int16x4_t a", + "const int n" ], "return_type": { - "value": "int32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.4H" }, - "b": { - "register": "Vm.2D" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -93416,26 +296588,27 @@ ], "instructions": [ [ - "RSUBHN" + "SRSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_u16", + "name": "vrshr_n_s32", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "int32x2_t a", + "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2S" }, - "b": { - "register": "Vm.8H" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -93445,26 +296618,27 @@ ], "instructions": [ [ - "RSUBHN" + "SRSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_u32", + "name": "vrshr_n_s64", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int64x1_t a", + "const int n" ], "return_type": { - "value": "uint16x4_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Dn" }, - "b": { - "register": "Vm.4S" + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -93474,26 +296648,27 @@ ], "instructions": [ [ - "RSUBHN" + "SRSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_u64", + "name": "vrshr_n_s8", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "int8x8_t a", + "const int n" ], "return_type": { - "value": "uint32x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.8B" }, - "b": { - "register": "Vm.2D" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -93503,31 +296678,27 @@ ], "instructions": [ [ - "RSUBHN" + "SRSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_f16", + "name": "vrshr_n_u16", "arguments": [ - "float16_t a", - "float16x4_t v", - "const int lane" + "uint16x4_t a", + "const int n" ], "return_type": { - "value": "float16x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "VnH" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Vn.4H" }, - "v": { - "register": "Vd.4H" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -93537,31 +296708,27 @@ ], "instructions": [ [ - "MOV" + "URSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_f32", + "name": "vrshr_n_u32", "arguments": [ - "float32_t a", - "float32x2_t v", - "const int lane" + "uint32x2_t a", + "const int n" ], "return_type": { - "value": "float32x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 1 + "register": "Vn.2S" }, - "v": { - "register": "Vd.2S" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -93571,63 +296738,57 @@ ], "instructions": [ [ - "MOV" + "URSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_f64", + "name": "vrshr_n_u64", "arguments": [ - "float64_t a", - "float64x1_t v", - "const int lane" + "uint64x1_t a", + "const int n" ], "return_type": { - "value": "float64x1_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 0 + "register": "Dn" }, - "v": { - "register": "Vd.1D" + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "MOV" + "URSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_p16", + "name": "vrshr_n_u8", "arguments": [ - "poly16_t a", - "poly16x4_t v", - "const int lane" + "uint8x8_t a", + "const int n" ], "return_type": { - "value": "poly16x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Vn.8B" }, - "v": { - "register": "Vd.4H" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -93637,302 +296798,275 @@ ], "instructions": [ [ - "MOV" + "URSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_p64", + "name": "vrshrd_n_s64", "arguments": [ - "poly64_t a", - "poly64x1_t v", - "const int lane" + "int64_t a", + "const int n" ], "return_type": { - "value": "poly64x1_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 0 + "register": "Dn" }, - "v": { - "register": "Vd.1D" + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "MOV" + "SRSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_p8", + "name": "vrshrd_n_u64", "arguments": [ - "poly8_t a", - "poly8x8_t v", - "const int lane" + "uint64_t a", + "const int n" ], "return_type": { - "value": "poly8x8_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 7 + "register": "Dn" }, - "v": { - "register": "Vd.8B" + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOV" + "URSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_s16", + "name": "vrshrn_high_n_s16", "arguments": [ - "int16_t a", - "int16x4_t v", - "const int lane" + "int8x8_t r", + "int16x8_t a", + "const int n" ], "return_type": { - "value": "int16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" + "register": "Vn.8H" }, - "lane": { - "minimum": 0, - "maximum": 3 + "n": { + "minimum": 1, + "maximum": 8 }, - "v": { - "register": "Vd.4H" + "r": { + "register": "Vd.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOV" + "RSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_s32", + "name": "vrshrn_high_n_s32", "arguments": [ - "int32_t a", - "int32x2_t v", - "const int lane" + "int16x4_t r", + "int32x4_t a", + "const int n" ], "return_type": { - "value": "int32x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" + "register": "Vn.4S" }, - "lane": { - "minimum": 0, - "maximum": 1 + "n": { + "minimum": 1, + "maximum": 16 }, - "v": { - "register": "Vd.2S" + "r": { + "register": "Vd.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOV" + "RSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_s64", + "name": "vrshrn_high_n_s64", "arguments": [ - "int64_t a", - "int64x1_t v", - "const int lane" + "int32x2_t r", + "int64x2_t a", + "const int n" ], "return_type": { - "value": "int64x1_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" + "register": "Vn.2D" }, - "lane": { - "minimum": 0, - "maximum": 0 + "n": { + "minimum": 1, + "maximum": 32 }, - "v": { - "register": "Vd.1D" + "r": { + "register": "Vd.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOV" + "RSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_s8", + "name": "vrshrn_high_n_u16", "arguments": [ - "int8_t a", - "int8x8_t v", - "const int lane" + "uint8x8_t r", + "uint16x8_t a", + "const int n" ], "return_type": { - "value": "int8x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" + "register": "Vn.8H" }, - "lane": { - "minimum": 0, - "maximum": 7 + "n": { + "minimum": 1, + "maximum": 8 }, - "v": { + "r": { "register": "Vd.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOV" + "RSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_u16", + "name": "vrshrn_high_n_u32", "arguments": [ - "uint16_t a", - "uint16x4_t v", - "const int lane" + "uint16x4_t r", + "uint32x4_t a", + "const int n" ], "return_type": { - "value": "uint16x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" + "register": "Vn.4S" }, - "lane": { - "minimum": 0, - "maximum": 3 + "n": { + "minimum": 1, + "maximum": 16 }, - "v": { + "r": { "register": "Vd.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOV" + "RSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_u32", + "name": "vrshrn_high_n_u64", "arguments": [ - "uint32_t a", - "uint32x2_t v", - "const int lane" + "uint32x2_t r", + "uint64x2_t a", + "const int n" ], "return_type": { - "value": "uint32x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" + "register": "Vn.2D" }, - "lane": { - "minimum": 0, - "maximum": 1 + "n": { + "minimum": 1, + "maximum": 32 }, - "v": { - "register": "Vd.2S" + "r": { + "register": "32(Vd)" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOV" + "RSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_u64", + "name": "vrshrn_n_s16", "arguments": [ - "uint64_t a", - "uint64x1_t v", - "const int lane" + "int16x8_t a", + "const int n" ], "return_type": { - "value": "uint64x1_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 0 + "register": "Vn.8H" }, - "v": { - "register": "Vd.1D" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -93942,31 +297076,27 @@ ], "instructions": [ [ - "MOV" + "RSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_u8", + "name": "vrshrn_n_s32", "arguments": [ - "uint8_t a", - "uint8x8_t v", - "const int lane" + "int32x4_t a", + "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 7 + "register": "Vn.4S" }, - "v": { - "register": "Vd.8B" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -93976,31 +297106,27 @@ ], "instructions": [ [ - "MOV" + "RSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_f16", + "name": "vrshrn_n_s64", "arguments": [ - "float16_t a", - "float16x8_t v", - "const int lane" + "int64x2_t a", + "const int n" ], "return_type": { - "value": "float16x8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "VnH" - }, - "lane": { - "minimum": 0, - "maximum": 7 + "register": "Vn.2D" }, - "v": { - "register": "Vd.8H" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -94010,31 +297136,27 @@ ], "instructions": [ [ - "MOV" + "RSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_f32", + "name": "vrshrn_n_u16", "arguments": [ - "float32_t a", - "float32x4_t v", - "const int lane" + "uint16x8_t a", + "const int n" ], "return_type": { - "value": "float32x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Vn.8H" }, - "v": { - "register": "Vd.4S" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -94044,63 +297166,57 @@ ], "instructions": [ [ - "MOV" + "RSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_f64", + "name": "vrshrn_n_u32", "arguments": [ - "float64_t a", - "float64x2_t v", - "const int lane" + "uint32x4_t a", + "const int n" ], "return_type": { - "value": "float64x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 1 + "register": "Vn.4S" }, - "v": { - "register": "Vd.2D" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "MOV" + "RSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_p16", + "name": "vrshrn_n_u64", "arguments": [ - "poly16_t a", - "poly16x8_t v", - "const int lane" + "uint64x2_t a", + "const int n" ], "return_type": { - "value": "poly16x8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 7 + "register": "Vn.2D" }, - "v": { - "register": "Vd.8H" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -94110,64 +297226,57 @@ ], "instructions": [ [ - "MOV" + "RSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_p64", + "name": "vrshrq_n_s16", "arguments": [ - "poly64_t a", - "poly64x2_t v", - "const int lane" + "int16x8_t a", + "const int n" ], "return_type": { - "value": "poly64x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 1 + "register": "Vn.8H" }, - "v": { - "register": "Vd.2D" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "MOV" + "SRSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_p8", + "name": "vrshrq_n_s32", "arguments": [ - "poly8_t a", - "poly8x16_t v", - "const int lane" + "int32x4_t a", + "const int n" ], "return_type": { - "value": "poly8x16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 15 + "register": "Vn.4S" }, - "v": { - "register": "Vd.16B" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -94177,31 +297286,27 @@ ], "instructions": [ [ - "MOV" + "SRSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_s16", + "name": "vrshrq_n_s64", "arguments": [ - "int16_t a", - "int16x8_t v", - "const int lane" + "int64x2_t a", + "const int n" ], "return_type": { - "value": "int16x8_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 7 + "register": "Vn.2D" }, - "v": { - "register": "Vd.8H" + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -94211,31 +297316,27 @@ ], "instructions": [ [ - "MOV" + "SRSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_s32", + "name": "vrshrq_n_s8", "arguments": [ - "int32_t a", - "int32x4_t v", - "const int lane" + "int8x16_t a", + "const int n" ], "return_type": { - "value": "int32x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Vn.16B" }, - "v": { - "register": "Vd.4S" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -94245,31 +297346,27 @@ ], "instructions": [ [ - "MOV" + "SRSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_s64", + "name": "vrshrq_n_u16", "arguments": [ - "int64_t a", - "int64x2_t v", - "const int lane" + "uint16x8_t a", + "const int n" ], "return_type": { - "value": "int64x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 1 + "register": "Vn.8H" }, - "v": { - "register": "Vd.2D" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -94279,31 +297376,27 @@ ], "instructions": [ [ - "MOV" + "URSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_s8", + "name": "vrshrq_n_u32", "arguments": [ - "int8_t a", - "int8x16_t v", - "const int lane" + "uint32x4_t a", + "const int n" ], "return_type": { - "value": "int8x16_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 15 + "register": "Vn.4S" }, - "v": { - "register": "Vd.16B" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -94313,31 +297406,27 @@ ], "instructions": [ [ - "MOV" + "URSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_u16", + "name": "vrshrq_n_u64", "arguments": [ - "uint16_t a", - "uint16x8_t v", - "const int lane" + "uint64x2_t a", + "const int n" ], "return_type": { - "value": "uint16x8_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 7 + "register": "Vn.2D" }, - "v": { - "register": "Vd.8H" + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -94347,31 +297436,27 @@ ], "instructions": [ [ - "MOV" + "URSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_u32", + "name": "vrshrq_n_u8", "arguments": [ - "uint32_t a", - "uint32x4_t v", - "const int lane" + "uint8x16_t a", + "const int n" ], "return_type": { - "value": "uint32x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Vn.16B" }, - "v": { - "register": "Vd.4S" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -94381,65 +297466,46 @@ ], "instructions": [ [ - "MOV" + "URSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_u64", + "name": "vrsqrte_f16", "arguments": [ - "uint64_t a", - "uint64x2_t v", - "const int lane" + "float16x4_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vd.2D" + "register": "Vn.4H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "MOV" + "FRSQRTE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_u8", + "name": "vrsqrte_f32", "arguments": [ - "uint8_t a", - "uint8x16_t v", - "const int lane" + "float32x2_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 15 - }, - "v": { - "register": "Vd.16B" + "register": "Vn.2S" } }, "Architectures": [ @@ -94449,150 +297515,116 @@ ], "instructions": [ [ - "MOV" + "FRSQRTE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha1cq_u32", + "name": "vrsqrte_f64", "arguments": [ - "uint32x4_t hash_abcd", - "uint32_t hash_e", - "uint32x4_t wk" + "float64x1_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float64x1_t" }, "Arguments_Preparation": { - "hash_abcd": { - "register": "Qd" - }, - "hash_e": { - "register": "Sn" - }, - "wk": { - "register": "Vm.4S" + "a": { + "register": "Dn" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SHA1C" + "FRSQRTE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha1h_u32", + "name": "vrsqrte_u32", "arguments": [ - "uint32_t hash_e" + "uint32x2_t a" ], "return_type": { - "value": "uint32_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { - "hash_e": { - "register": "Sn" + "a": { + "register": "Vn.2S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "SHA1H" + "URSQRTE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha1mq_u32", + "name": "vrsqrted_f64", "arguments": [ - "uint32x4_t hash_abcd", - "uint32_t hash_e", - "uint32x4_t wk" + "float64_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float64_t" }, "Arguments_Preparation": { - "hash_abcd": { - "register": "Qd" - }, - "hash_e": { - "register": "Sn" - }, - "wk": { - "register": "Vm.4S" + "a": { + "register": "Dn" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SHA1M" + "FRSQRTE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha1pq_u32", + "name": "vrsqrteh_f16", "arguments": [ - "uint32x4_t hash_abcd", - "uint32_t hash_e", - "uint32x4_t wk" + "float16_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float16_t" }, "Arguments_Preparation": { - "hash_abcd": { - "register": "Qd" - }, - "hash_e": { - "register": "Sn" - }, - "wk": { - "register": "Vm.4S" + "a": { + "register": "Hn" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SHA1P" + "FRSQRTE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha1su0q_u32", + "name": "vrsqrteq_f16", "arguments": [ - "uint32x4_t w0_3", - "uint32x4_t w4_7", - "uint32x4_t w8_11" + "float16x8_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { - "w0_3": { - "register": "Vd.4S" - }, - "w4_7": { - "register": "Vn.4S" - }, - "w8_11": { - "register": "Vm.4S" + "a": { + "register": "Vn.8H" } }, "Architectures": [ @@ -94601,150 +297633,122 @@ ], "instructions": [ [ - "SHA1SU0" + "FRSQRTE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha1su1q_u32", + "name": "vrsqrteq_f32", "arguments": [ - "uint32x4_t tw0_3", - "uint32x4_t w12_15" + "float32x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { - "tw0_3": { - "register": "Vd.4S" - }, - "w12_15": { + "a": { "register": "Vn.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "SHA1SU1" + "FRSQRTE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha256h2q_u32", + "name": "vrsqrteq_f64", "arguments": [ - "uint32x4_t hash_efgh", - "uint32x4_t hash_abcd", - "uint32x4_t wk" + "float64x2_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { - "hash_abcd": { - "register": "Qn" - }, - "hash_efgh": { - "register": "Qd" - }, - "wk": { - "register": "Vm.4S" + "a": { + "register": "Vn.2D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SHA256H2" + "FRSQRTE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha256hq_u32", + "name": "vrsqrteq_u32", "arguments": [ - "uint32x4_t hash_abcd", - "uint32x4_t hash_efgh", - "uint32x4_t wk" + "uint32x4_t a" ], "return_type": { "value": "uint32x4_t" }, "Arguments_Preparation": { - "hash_abcd": { - "register": "Qd" - }, - "hash_efgh": { - "register": "Qn" - }, - "wk": { - "register": "Vm.4S" + "a": { + "register": "Vn.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "SHA256H" + "URSQRTE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha256su0q_u32", + "name": "vrsqrtes_f32", "arguments": [ - "uint32x4_t w0_3", - "uint32x4_t w4_7" + "float32_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float32_t" }, "Arguments_Preparation": { - "w0_3": { - "register": "Vd.4S" - }, - "w4_7": { - "register": "Vn.4S" + "a": { + "register": "Sn" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SHA256SU0" + "FRSQRTE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha256su1q_u32", + "name": "vrsqrts_f16", "arguments": [ - "uint32x4_t tw0_3", - "uint32x4_t w8_11", - "uint32x4_t w12_15" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "float16x4_t" }, "Arguments_Preparation": { - "tw0_3": { - "register": "Vd.4S" - }, - "w12_15": { - "register": "Vm.4S" + "a": { + "register": "Vn.4H" }, - "w8_11": { - "register": "Vn.4S" + "b": { + "register": "Vm.4H" } }, "Architectures": [ @@ -94753,84 +297757,82 @@ ], "instructions": [ [ - "SHA256SU1" + "FRSQRTS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha512h2q_u64", + "name": "vrsqrts_f32", "arguments": [ - "uint64x2_t sum_ab", - "uint64x2_t hash_c_", - "uint64x2_t hash_ab" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "float32x2_t" }, "Arguments_Preparation": { - "hash_ab": {}, - "hash_c_": { - "register": "Qn" + "a": { + "register": "Vn.2S" }, - "sum_ab": { - "register": "Qd" + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SHA512H2" + "FRSQRTS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha512hq_u64", + "name": "vrsqrts_f64", "arguments": [ - "uint64x2_t hash_ed", - "uint64x2_t hash_gf", - "uint64x2_t kwh_kwh2" + "float64x1_t a", + "float64x1_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { - "hash_ed": { - "register": "Qd" - }, - "hash_gf": { - "register": "Qn" + "a": { + "register": "Dn" }, - "kwh_kwh2": {} + "b": { + "register": "Dm" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "SHA512H" + "FRSQRTS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha512su0q_u64", + "name": "vrsqrtsd_f64", "arguments": [ - "uint64x2_t w0_1", - "uint64x2_t w2_" + "float64_t a", + "float64_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "float64_t" }, "Arguments_Preparation": { - "w0_1": { - "register": "Vd.2D" + "a": { + "register": "Dn" }, - "w2_": { - "register": "Vn.2D" + "b": { + "register": "Dm" } }, "Architectures": [ @@ -94838,86 +297840,81 @@ ], "instructions": [ [ - "SHA512SU0" + "FRSQRTS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha512su1q_u64", + "name": "vrsqrtsh_f16", "arguments": [ - "uint64x2_t s01_s02", - "uint64x2_t w14_15", - "uint64x2_t w9_10" + "float16_t a", + "float16_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "float16_t" }, "Arguments_Preparation": { - "s01_s02": { - "register": "Vd.2D" - }, - "w14_15": { - "register": "Vn.2D" + "a": { + "register": "Hn" }, - "w9_10": {} + "b": { + "register": "Hm" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "SHA512SU1" + "FRSQRTS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_n_s16", + "name": "vrsqrtsq_f16", "arguments": [ - "int16x4_t a", - "const int n" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "int16x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.8H" }, - "n": { - "minimum": 0, - "maximum": 15 + "b": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SHL" + "FRSQRTS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_n_s32", + "name": "vrsqrtsq_f32", "arguments": [ - "int32x2_t a", - "const int n" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "int32x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.4S" }, - "n": { - "minimum": 0, - "maximum": 31 + "b": { + "register": "Vm.4S" } }, "Architectures": [ @@ -94927,87 +297924,85 @@ ], "instructions": [ [ - "SHL" + "FRSQRTS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_n_s64", + "name": "vrsqrtsq_f64", "arguments": [ - "int64x1_t a", - "const int n" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "int64x1_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.2D" }, - "n": { - "minimum": 0, - "maximum": 63 + "b": { + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SHL" + "FRSQRTS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_n_s8", + "name": "vrsqrtss_f32", "arguments": [ - "int8x8_t a", - "const int n" + "float32_t a", + "float32_t b" ], "return_type": { - "value": "int8x8_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Sn" }, - "n": { - "minimum": 0, - "maximum": 7 + "b": { + "register": "Sm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SHL" + "FRSQRTS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_n_u16", + "name": "vrsra_n_s16", "arguments": [ - "uint16x4_t a", + "int16x4_t a", + "int16x4_t b", "const int n" ], "return_type": { - "value": "uint16x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.4H" + }, + "b": { "register": "Vn.4H" }, "n": { - "minimum": 0, - "maximum": 15 + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -95017,27 +298012,31 @@ ], "instructions": [ [ - "SHL" + "SRSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_n_u32", + "name": "vrsra_n_s32", "arguments": [ - "uint32x2_t a", + "int32x2_t a", + "int32x2_t b", "const int n" ], "return_type": { - "value": "uint32x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.2S" + }, + "b": { "register": "Vn.2S" }, "n": { - "minimum": 0, - "maximum": 31 + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -95047,27 +298046,31 @@ ], "instructions": [ [ - "SHL" + "SRSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_n_u64", + "name": "vrsra_n_s64", "arguments": [ - "uint64x1_t a", + "int64x1_t a", + "int64x1_t b", "const int n" ], "return_type": { - "value": "uint64x1_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { + "register": "Dd" + }, + "b": { "register": "Dn" }, "n": { - "minimum": 0, - "maximum": 63 + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -95077,27 +298080,31 @@ ], "instructions": [ [ - "SHL" + "SRSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_n_u8", + "name": "vrsra_n_s8", "arguments": [ - "uint8x8_t a", + "int8x8_t a", + "int8x8_t b", "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.8B" + }, + "b": { "register": "Vn.8B" }, "n": { - "minimum": 0, - "maximum": 7 + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -95107,26 +298114,31 @@ ], "instructions": [ [ - "SHL" + "SRSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_s16", + "name": "vrsra_n_u16", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "uint16x4_t a", + "uint16x4_t b", + "const int n" ], "return_type": { - "value": "int16x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.4H" }, "b": { - "register": "Vm.4H" + "register": "Vn.4H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -95136,26 +298148,31 @@ ], "instructions": [ [ - "SSHL" + "URSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_s32", + "name": "vrsra_n_u32", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "uint32x2_t a", + "uint32x2_t b", + "const int n" ], "return_type": { - "value": "int32x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.2S" }, "b": { - "register": "Vm.2S" + "register": "Vn.2S" + }, + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -95165,26 +298182,31 @@ ], "instructions": [ [ - "SSHL" + "URSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_s64", + "name": "vrsra_n_u64", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "uint64x1_t a", + "uint64x1_t b", + "const int n" ], "return_type": { - "value": "int64x1_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Dd" }, "b": { - "register": "Dm" + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -95194,26 +298216,31 @@ ], "instructions": [ [ - "SSHL" + "URSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_s8", + "name": "vrsra_n_u8", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "uint8x8_t a", + "uint8x8_t b", + "const int n" ], "return_type": { - "value": "int8x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vd.8B" }, "b": { - "register": "Vm.8B" + "register": "Vn.8B" + }, + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -95223,84 +298250,95 @@ ], "instructions": [ [ - "SSHL" + "URSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_u16", + "name": "vrsrad_n_s64", "arguments": [ - "uint16x4_t a", - "int16x4_t b" + "int64_t a", + "int64_t b", + "const int n" ], "return_type": { - "value": "uint16x4_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Dd" }, "b": { - "register": "Vm.4H" + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USHL" + "SRSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_u32", + "name": "vrsrad_n_u64", "arguments": [ - "uint32x2_t a", - "int32x2_t b" + "uint64_t a", + "uint64_t b", + "const int n" ], "return_type": { - "value": "uint32x2_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Dd" }, "b": { - "register": "Vm.2S" + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USHL" + "URSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_u64", + "name": "vrsraq_n_s16", "arguments": [ - "uint64x1_t a", - "int64x1_t b" + "int16x8_t a", + "int16x8_t b", + "const int n" ], "return_type": { - "value": "uint64x1_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.8H" }, "b": { - "register": "Dm" + "register": "Vn.8H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -95310,26 +298348,31 @@ ], "instructions": [ [ - "USHL" + "SRSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_u8", + "name": "vrsraq_n_s32", "arguments": [ - "uint8x8_t a", - "int8x8_t b" + "int32x4_t a", + "int32x4_t b", + "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vd.4S" }, "b": { - "register": "Vm.8B" + "register": "Vn.4S" + }, + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -95339,193 +298382,234 @@ ], "instructions": [ [ - "USHL" + "SRSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshld_n_s64", + "name": "vrsraq_n_s64", "arguments": [ - "int64_t a", + "int64x2_t a", + "int64x2_t b", "const int n" ], "return_type": { - "value": "int64_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2D" }, "n": { - "minimum": 0, - "maximum": 63 + "minimum": 1, + "maximum": 64 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SHL" + "SRSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshld_n_u64", + "name": "vrsraq_n_s8", "arguments": [ - "uint64_t a", + "int8x16_t a", + "int8x16_t b", "const int n" ], "return_type": { - "value": "uint64_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" }, "n": { - "minimum": 0, - "maximum": 63 + "minimum": 1, + "maximum": 8 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SHL" + "SRSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshld_s64", + "name": "vrsraq_n_u16", "arguments": [ - "int64_t a", - "int64_t b" + "uint16x8_t a", + "uint16x8_t b", + "const int n" ], "return_type": { - "value": "int64_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.8H" }, "b": { - "register": "Dm" + "register": "Vn.8H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SSHL" + "URSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshld_u64", + "name": "vrsraq_n_u32", "arguments": [ - "uint64_t a", - "int64_t b" + "uint32x4_t a", + "uint32x4_t b", + "const int n" ], "return_type": { - "value": "uint64_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.4S" }, "b": { - "register": "Dm" + "register": "Vn.4S" + }, + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USHL" + "URSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_high_n_s16", + "name": "vrsraq_n_u64", "arguments": [ - "int16x8_t a", + "uint64x2_t a", + "uint64x2_t b", "const int n" ], "return_type": { - "value": "int32x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2D" }, "n": { - "minimum": 0, - "maximum": 16 + "minimum": 1, + "maximum": 64 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SSHLL2" + "URSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_high_n_s32", + "name": "vrsraq_n_u8", "arguments": [ - "int32x4_t a", + "uint8x16_t a", + "uint8x16_t b", "const int n" ], "return_type": { - "value": "int64x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" }, "n": { - "minimum": 0, - "maximum": 32 + "minimum": 1, + "maximum": 8 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SSHLL2" + "URSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_high_n_s8", + "name": "vrsubhn_high_s16", "arguments": [ - "int8x16_t a", - "const int n" + "int8x8_t r", + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, - "n": { - "minimum": 0, - "maximum": 8 + "b": { + "register": "Vm.8H" + }, + "r": { + "register": "Vd.8B" } }, "Architectures": [ @@ -95533,27 +298617,30 @@ ], "instructions": [ [ - "SSHLL2" + "RSUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_high_n_u16", + "name": "vrsubhn_high_s32", "arguments": [ - "uint16x8_t a", - "const int n" + "int16x4_t r", + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4S" }, - "n": { - "minimum": 0, - "maximum": 16 + "b": { + "register": "Vm.4S" + }, + "r": { + "register": "Vd.4H" } }, "Architectures": [ @@ -95561,27 +298648,30 @@ ], "instructions": [ [ - "USHLL2" + "RSUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_high_n_u32", + "name": "vrsubhn_high_s64", "arguments": [ - "uint32x4_t a", - "const int n" + "int32x2_t r", + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2D" }, - "n": { - "minimum": 0, - "maximum": 32 + "b": { + "register": "Vm.2D" + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ @@ -95589,27 +298679,30 @@ ], "instructions": [ [ - "USHLL2" + "RSUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_high_n_u8", + "name": "vrsubhn_high_u16", "arguments": [ - "uint8x16_t a", - "const int n" + "uint8x8_t r", + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, - "n": { - "minimum": 0, - "maximum": 8 + "b": { + "register": "Vm.8H" + }, + "r": { + "register": "Vd.8B" } }, "Architectures": [ @@ -95617,87 +298710,88 @@ ], "instructions": [ [ - "USHLL2" + "RSUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_n_s16", + "name": "vrsubhn_high_u32", "arguments": [ - "int16x4_t a", - "const int n" + "uint16x4_t r", + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.4S" }, - "n": { - "minimum": 0, - "maximum": 16 + "b": { + "register": "Vm.4S" + }, + "r": { + "register": "Vd.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SSHLL" + "RSUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_n_s32", + "name": "vrsubhn_high_u64", "arguments": [ - "int32x2_t a", - "const int n" + "uint32x2_t r", + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.2D" }, - "n": { - "minimum": 0, - "maximum": 32 + "b": { + "register": "Vm.2D" + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SSHLL" + "RSUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_n_s8", + "name": "vrsubhn_s16", "arguments": [ - "int8x8_t a", - "const int n" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, - "n": { - "minimum": 0, - "maximum": 8 + "b": { + "register": "Vm.8H" } }, "Architectures": [ @@ -95707,27 +298801,26 @@ ], "instructions": [ [ - "SSHLL" + "RSUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_n_u16", + "name": "vrsubhn_s32", "arguments": [ - "uint16x4_t a", - "const int n" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.4S" }, - "n": { - "minimum": 0, - "maximum": 16 + "b": { + "register": "Vm.4S" } }, "Architectures": [ @@ -95737,27 +298830,26 @@ ], "instructions": [ [ - "USHLL" + "RSUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_n_u32", + "name": "vrsubhn_s64", "arguments": [ - "uint32x2_t a", - "const int n" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.2D" }, - "n": { - "minimum": 0, - "maximum": 32 + "b": { + "register": "Vm.2D" } }, "Architectures": [ @@ -95767,27 +298859,26 @@ ], "instructions": [ [ - "USHLL" + "RSUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_n_u8", + "name": "vrsubhn_u16", "arguments": [ - "uint8x8_t a", - "const int n" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, - "n": { - "minimum": 0, - "maximum": 8 + "b": { + "register": "Vm.8H" } }, "Architectures": [ @@ -95797,27 +298888,26 @@ ], "instructions": [ [ - "USHLL" + "RSUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_n_s16", + "name": "vrsubhn_u32", "arguments": [ - "int16x8_t a", - "const int n" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "int16x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4S" }, - "n": { - "minimum": 0, - "maximum": 15 + "b": { + "register": "Vm.4S" } }, "Architectures": [ @@ -95827,27 +298917,26 @@ ], "instructions": [ [ - "SHL" + "RSUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_n_s32", + "name": "vrsubhn_u64", "arguments": [ - "int32x4_t a", - "const int n" + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2D" }, - "n": { - "minimum": 0, - "maximum": 31 + "b": { + "register": "Vm.2D" } }, "Architectures": [ @@ -95857,177 +298946,166 @@ ], "instructions": [ [ - "SHL" + "RSUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_n_s64", + "name": "vscale_f16", "arguments": [ - "int64x2_t a", - "const int n" + "float16x4_t vn", + "int16x4_t vm" ], "return_type": { - "value": "int64x2_t" + "value": "float16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "vm": { + "register": "Vm.4H" }, - "n": { - "minimum": 0, - "maximum": 63 + "vn": { + "register": "Vn.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SHL" + "FSCALE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_n_s8", + "name": "vscale_f32", "arguments": [ - "int8x16_t a", - "const int n" + "float32x2_t vn", + "int32x2_t vm" ], "return_type": { - "value": "int8x16_t" + "value": "float32x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "vm": { + "register": "Vm.2S" }, - "n": { - "minimum": 0, - "maximum": 7 + "vn": { + "register": "Vn.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SHL" + "FSCALE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_n_u16", + "name": "vscaleq_f16", "arguments": [ - "uint16x8_t a", - "const int n" + "float16x8_t vn", + "int16x8_t vm" ], "return_type": { - "value": "uint16x8_t" + "value": "float16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "vm": { + "register": "Vm.8H" }, - "n": { - "minimum": 0, - "maximum": 15 + "vn": { + "register": "Vn.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SHL" + "FSCALE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_n_u32", + "name": "vscaleq_f32", "arguments": [ - "uint32x4_t a", - "const int n" + "float32x4_t vn", + "int32x4_t vm" ], "return_type": { - "value": "uint32x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "vm": { + "register": "Vm.4S" }, - "n": { - "minimum": 0, - "maximum": 31 + "vn": { + "register": "Vn.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SHL" + "FSCALE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_n_u64", + "name": "vscaleq_f64", "arguments": [ - "uint64x2_t a", - "const int n" + "float64x2_t vn", + "int64x2_t vm" ], "return_type": { - "value": "uint64x2_t" + "value": "float64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "vm": { + "register": "Vm.2D" }, - "n": { - "minimum": 0, - "maximum": 63 + "vn": { + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SHL" + "FSCALE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_n_u8", + "name": "vset_lane_f16", "arguments": [ - "uint8x16_t a", - "const int n" + "float16_t a", + "float16x4_t v", + "const int lane" ], "return_type": { - "value": "uint8x16_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "VnH" }, - "n": { + "lane": { "minimum": 0, - "maximum": 7 + "maximum": 3 + }, + "v": { + "register": "Vd.4H" } }, "Architectures": [ @@ -96037,26 +299115,31 @@ ], "instructions": [ [ - "SHL" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_s16", + "name": "vset_lane_f32", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "float32_t a", + "float32x2_t v", + "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Rn" }, - "b": { - "register": "Vm.8H" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vd.2S" } }, "Architectures": [ @@ -96066,55 +299149,63 @@ ], "instructions": [ [ - "SSHL" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_s32", + "name": "vset_lane_f64", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "float64_t a", + "float64x1_t v", + "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Rn" }, - "b": { - "register": "Vm.4S" + "lane": { + "minimum": 0, + "maximum": 0 + }, + "v": { + "register": "Vd.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SSHL" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_s64", + "name": "vset_lane_p16", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "poly16_t a", + "poly16x4_t v", + "const int lane" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Rn" }, - "b": { - "register": "Vm.2D" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vd.4H" } }, "Architectures": [ @@ -96124,55 +299215,64 @@ ], "instructions": [ [ - "SSHL" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_s8", + "name": "vset_lane_p64", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "poly64_t a", + "poly64x1_t v", + "const int lane" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Rn" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 0 + }, + "v": { + "register": "Vd.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SSHL" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_u16", + "name": "vset_lane_p8", "arguments": [ - "uint16x8_t a", - "int16x8_t b" + "poly8_t a", + "poly8x8_t v", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Rn" }, - "b": { - "register": "Vm.8H" + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vd.8B" } }, "Architectures": [ @@ -96182,26 +299282,31 @@ ], "instructions": [ [ - "USHL" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_u32", + "name": "vset_lane_s16", "arguments": [ - "uint32x4_t a", - "int32x4_t b" + "int16_t a", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Rn" }, - "b": { - "register": "Vm.4S" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vd.4H" } }, "Architectures": [ @@ -96211,26 +299316,31 @@ ], "instructions": [ [ - "USHL" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_u64", + "name": "vset_lane_s32", "arguments": [ - "uint64x2_t a", - "int64x2_t b" + "int32_t a", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Rn" }, - "b": { - "register": "Vm.2D" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vd.2S" } }, "Architectures": [ @@ -96240,26 +299350,31 @@ ], "instructions": [ [ - "USHL" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_u8", + "name": "vset_lane_s64", "arguments": [ - "uint8x16_t a", - "int8x16_t b" + "int64_t a", + "int64x1_t v", + "const int lane" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Rn" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 0 + }, + "v": { + "register": "Vd.1D" } }, "Architectures": [ @@ -96269,27 +299384,31 @@ ], "instructions": [ [ - "USHL" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshr_n_s16", + "name": "vset_lane_s8", "arguments": [ - "int16x4_t a", - "const int n" + "int8_t a", + "int8x8_t v", + "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 16 + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vd.8B" } }, "Architectures": [ @@ -96299,27 +299418,31 @@ ], "instructions": [ [ - "SSHR" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshr_n_s32", + "name": "vset_lane_u16", "arguments": [ - "int32x2_t a", - "const int n" + "uint16_t a", + "uint16x4_t v", + "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 32 + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vd.4H" } }, "Architectures": [ @@ -96329,27 +299452,31 @@ ], "instructions": [ [ - "SSHR" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshr_n_s64", + "name": "vset_lane_u32", "arguments": [ - "int64x1_t a", - "const int n" + "uint32_t a", + "uint32x2_t v", + "const int lane" ], "return_type": { - "value": "int64x1_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 64 + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vd.2S" } }, "Architectures": [ @@ -96359,27 +299486,31 @@ ], "instructions": [ [ - "SSHR" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshr_n_s8", + "name": "vset_lane_u64", "arguments": [ - "int8x8_t a", - "const int n" + "uint64_t a", + "uint64x1_t v", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 8 + "lane": { + "minimum": 0, + "maximum": 0 + }, + "v": { + "register": "Vd.1D" } }, "Architectures": [ @@ -96389,27 +299520,31 @@ ], "instructions": [ [ - "SSHR" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshr_n_u16", + "name": "vset_lane_u8", "arguments": [ - "uint16x4_t a", - "const int n" + "uint8_t a", + "uint8x8_t v", + "const int lane" ], "return_type": { - "value": "uint16x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 16 + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vd.8B" } }, "Architectures": [ @@ -96419,27 +299554,31 @@ ], "instructions": [ [ - "USHR" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshr_n_u32", + "name": "vsetq_lane_f16", "arguments": [ - "uint32x2_t a", - "const int n" + "float16_t a", + "float16x8_t v", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "VnH" }, - "n": { - "minimum": 1, - "maximum": 32 + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vd.8H" } }, "Architectures": [ @@ -96449,27 +299588,31 @@ ], "instructions": [ [ - "USHR" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshr_n_u64", + "name": "vsetq_lane_f32", "arguments": [ - "uint64x1_t a", - "const int n" + "float32_t a", + "float32x4_t v", + "const int lane" ], "return_type": { - "value": "uint64x1_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 64 + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vd.4S" } }, "Architectures": [ @@ -96479,305 +299622,334 @@ ], "instructions": [ [ - "USHR" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshr_n_u8", + "name": "vsetq_lane_f64", "arguments": [ - "uint8x8_t a", - "const int n" + "float64_t a", + "float64x2_t v", + "const int lane" ], "return_type": { - "value": "uint8x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 8 + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vd.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USHR" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrd_n_s64", + "name": "vsetq_lane_p16", "arguments": [ - "int64_t a", - "const int n" + "poly16_t a", + "poly16x8_t v", + "const int lane" ], "return_type": { - "value": "int64_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 64 + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vd.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SSHR" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrd_n_u64", + "name": "vsetq_lane_p64", "arguments": [ - "uint64_t a", - "const int n" + "poly64_t a", + "poly64x2_t v", + "const int lane" ], "return_type": { - "value": "uint64_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 64 + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vd.2D" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "USHR" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_high_n_s16", + "name": "vsetq_lane_p8", "arguments": [ - "int8x8_t r", - "int16x8_t a", - "const int n" + "poly8_t a", + "poly8x16_t v", + "const int lane" ], "return_type": { - "value": "int8x16_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 8 + "lane": { + "minimum": 0, + "maximum": 15 }, - "r": { - "register": "Vd.8B" + "v": { + "register": "Vd.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SHRN2" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_high_n_s32", + "name": "vsetq_lane_s16", "arguments": [ - "int16x4_t r", - "int32x4_t a", - "const int n" + "int16_t a", + "int16x8_t v", + "const int lane" ], "return_type": { "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 16 + "lane": { + "minimum": 0, + "maximum": 7 }, - "r": { - "register": "Vd.4H" + "v": { + "register": "Vd.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SHRN2" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_high_n_s64", + "name": "vsetq_lane_s32", "arguments": [ - "int32x2_t r", - "int64x2_t a", - "const int n" + "int32_t a", + "int32x4_t v", + "const int lane" ], "return_type": { "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 32 + "lane": { + "minimum": 0, + "maximum": 3 }, - "r": { - "register": "Vd.2S" + "v": { + "register": "Vd.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SHRN2" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_high_n_u16", + "name": "vsetq_lane_s64", "arguments": [ - "uint8x8_t r", - "uint16x8_t a", - "const int n" + "int64_t a", + "int64x2_t v", + "const int lane" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 8 + "lane": { + "minimum": 0, + "maximum": 1 }, - "r": { - "register": "Vd.8B" + "v": { + "register": "Vd.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SHRN2" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_high_n_u32", + "name": "vsetq_lane_s8", "arguments": [ - "uint16x4_t r", - "uint32x4_t a", - "const int n" + "int8_t a", + "int8x16_t v", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 16 + "lane": { + "minimum": 0, + "maximum": 15 }, - "r": { - "register": "Vd.4H" + "v": { + "register": "Vd.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SHRN2" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_high_n_u64", + "name": "vsetq_lane_u16", "arguments": [ - "uint32x2_t r", - "uint64x2_t a", - "const int n" + "uint16_t a", + "uint16x8_t v", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 32 + "lane": { + "minimum": 0, + "maximum": 7 }, - "r": { - "register": "Vd.2S" + "v": { + "register": "Vd.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SHRN2" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_n_s16", + "name": "vsetq_lane_u32", "arguments": [ - "int16x8_t a", - "const int n" + "uint32_t a", + "uint32x4_t v", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 8 + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vd.4S" } }, "Architectures": [ @@ -96787,27 +299959,31 @@ ], "instructions": [ [ - "SHRN" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_n_s32", + "name": "vsetq_lane_u64", "arguments": [ - "int32x4_t a", - "const int n" + "uint64_t a", + "uint64x2_t v", + "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 16 + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vd.2D" } }, "Architectures": [ @@ -96817,27 +299993,31 @@ ], "instructions": [ [ - "SHRN" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_n_s64", + "name": "vsetq_lane_u8", "arguments": [ - "int64x2_t a", - "const int n" + "uint8_t a", + "uint8x16_t v", + "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 32 + "lane": { + "minimum": 0, + "maximum": 15 + }, + "v": { + "register": "Vd.16B" } }, "Architectures": [ @@ -96847,447 +300027,439 @@ ], "instructions": [ [ - "SHRN" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_n_u16", + "name": "vsha1cq_u32", "arguments": [ - "uint16x8_t a", - "const int n" + "uint32x4_t hash_abcd", + "uint32_t hash_e", + "uint32x4_t wk" ], "return_type": { - "value": "uint8x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "hash_abcd": { + "register": "Qd" }, - "n": { - "minimum": 1, - "maximum": 8 + "hash_e": { + "register": "Sn" + }, + "wk": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SHRN" + "SHA1C" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_n_u32", + "name": "vsha1h_u32", "arguments": [ - "uint32x4_t a", - "const int n" + "uint32_t hash_e" ], "return_type": { - "value": "uint16x4_t" + "value": "uint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "n": { - "minimum": 1, - "maximum": 16 + "hash_e": { + "register": "Sn" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SHRN" + "SHA1H" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_n_u64", + "name": "vsha1mq_u32", "arguments": [ - "uint64x2_t a", - "const int n" + "uint32x4_t hash_abcd", + "uint32_t hash_e", + "uint32x4_t wk" ], "return_type": { - "value": "uint32x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "hash_abcd": { + "register": "Qd" }, - "n": { - "minimum": 1, - "maximum": 32 + "hash_e": { + "register": "Sn" + }, + "wk": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SHRN" + "SHA1M" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrq_n_s16", + "name": "vsha1pq_u32", "arguments": [ - "int16x8_t a", - "const int n" + "uint32x4_t hash_abcd", + "uint32_t hash_e", + "uint32x4_t wk" ], "return_type": { - "value": "int16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "hash_abcd": { + "register": "Qd" }, - "n": { - "minimum": 1, - "maximum": 16 + "hash_e": { + "register": "Sn" + }, + "wk": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SSHR" + "SHA1P" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrq_n_s32", + "name": "vsha1su0q_u32", "arguments": [ - "int32x4_t a", - "const int n" + "uint32x4_t w0_3", + "uint32x4_t w4_7", + "uint32x4_t w8_11" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "a": { + "w0_3": { + "register": "Vd.4S" + }, + "w4_7": { "register": "Vn.4S" }, - "n": { - "minimum": 1, - "maximum": 32 + "w8_11": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SSHR" + "SHA1SU0" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrq_n_s64", + "name": "vsha1su1q_u32", "arguments": [ - "int64x2_t a", - "const int n" + "uint32x4_t tw0_3", + "uint32x4_t w12_15" ], "return_type": { - "value": "int64x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "tw0_3": { + "register": "Vd.4S" }, - "n": { - "minimum": 1, - "maximum": 64 + "w12_15": { + "register": "Vn.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SSHR" + "SHA1SU1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrq_n_s8", + "name": "vsha256h2q_u32", "arguments": [ - "int8x16_t a", - "const int n" + "uint32x4_t hash_efgh", + "uint32x4_t hash_abcd", + "uint32x4_t wk" ], "return_type": { - "value": "int8x16_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "hash_abcd": { + "register": "Qn" }, - "n": { - "minimum": 1, - "maximum": 8 + "hash_efgh": { + "register": "Qd" + }, + "wk": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SSHR" + "SHA256H2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrq_n_u16", + "name": "vsha256hq_u32", "arguments": [ - "uint16x8_t a", - "const int n" + "uint32x4_t hash_abcd", + "uint32x4_t hash_efgh", + "uint32x4_t wk" ], "return_type": { - "value": "uint16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "hash_abcd": { + "register": "Qd" }, - "n": { - "minimum": 1, - "maximum": 16 + "hash_efgh": { + "register": "Qn" + }, + "wk": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "USHR" + "SHA256H" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrq_n_u32", + "name": "vsha256su0q_u32", "arguments": [ - "uint32x4_t a", - "const int n" + "uint32x4_t w0_3", + "uint32x4_t w4_7" ], "return_type": { "value": "uint32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "w0_3": { + "register": "Vd.4S" }, - "n": { - "minimum": 1, - "maximum": 32 + "w4_7": { + "register": "Vn.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "USHR" + "SHA256SU0" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrq_n_u64", + "name": "vsha256su1q_u32", "arguments": [ - "uint64x2_t a", - "const int n" + "uint32x4_t tw0_3", + "uint32x4_t w8_11", + "uint32x4_t w12_15" ], "return_type": { - "value": "uint64x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "tw0_3": { + "register": "Vd.4S" }, - "n": { - "minimum": 1, - "maximum": 64 + "w12_15": { + "register": "Vm.4S" + }, + "w8_11": { + "register": "Vn.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "USHR" + "SHA256SU1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrq_n_u8", + "name": "vsha512h2q_u64", "arguments": [ - "uint8x16_t a", - "const int n" + "uint64x2_t sum_ab", + "uint64x2_t hash_c_", + "uint64x2_t hash_ab" ], "return_type": { - "value": "uint8x16_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "hash_ab": { + "register": "Vm.2D" }, - "n": { - "minimum": 1, - "maximum": 8 + "hash_c_": { + "register": "Qn" + }, + "sum_ab": { + "register": "Qd" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USHR" + "SHA512H2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsli_n_p16", + "name": "vsha512hq_u64", "arguments": [ - "poly16x4_t a", - "poly16x4_t b", - "const int n" + "uint64x2_t hash_ed", + "uint64x2_t hash_gf", + "uint64x2_t kwh_kwh2" ], "return_type": { - "value": "poly16x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4H" + "hash_ed": { + "register": "Qd" }, - "b": { - "register": "Vn.4H" + "hash_gf": { + "register": "Qn" }, - "n": { - "minimum": 0, - "maximum": 15 + "kwh_kwh2": { + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SLI" + "SHA512H" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsli_n_p64", + "name": "vsha512su0q_u64", "arguments": [ - "poly64x1_t a", - "poly64x1_t b", - "const int n" + "uint64x2_t w0_1", + "uint64x2_t w2_" ], "return_type": { - "value": "poly64x1_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Dd" - }, - "b": { - "register": "Dn" + "w0_1": { + "register": "Vd.2D" }, - "n": { - "minimum": 0, - "maximum": 63 + "w2_": { + "register": "Vn.2D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SLI" + "SHA512SU0" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsli_n_p8", + "name": "vsha512su1q_u64", "arguments": [ - "poly8x8_t a", - "poly8x8_t b", - "const int n" + "uint64x2_t s01_s02", + "uint64x2_t w14_15", + "uint64x2_t w9_10" ], "return_type": { - "value": "poly8x8_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "s01_s02": { + "register": "Vd.2D" }, - "b": { - "register": "Vn.8B" + "w14_15": { + "register": "Vn.2D" }, - "n": { - "minimum": 0, - "maximum": 7 + "w9_10": { + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SLI" + "SHA512SU1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsli_n_s16", + "name": "vshl_n_s16", "arguments": [ "int16x4_t a", - "int16x4_t b", "const int n" ], "return_type": { @@ -97295,9 +300467,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { "register": "Vn.4H" }, "n": { @@ -97312,16 +300481,15 @@ ], "instructions": [ [ - "SLI" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsli_n_s32", + "name": "vshl_n_s32", "arguments": [ "int32x2_t a", - "int32x2_t b", "const int n" ], "return_type": { @@ -97329,9 +300497,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { "register": "Vn.2S" }, "n": { @@ -97346,16 +300511,15 @@ ], "instructions": [ [ - "SLI" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsli_n_s64", + "name": "vshl_n_s64", "arguments": [ "int64x1_t a", - "int64x1_t b", "const int n" ], "return_type": { @@ -97363,9 +300527,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { "register": "Dn" }, "n": { @@ -97380,16 +300541,15 @@ ], "instructions": [ [ - "SLI" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsli_n_s8", + "name": "vshl_n_s8", "arguments": [ "int8x8_t a", - "int8x8_t b", "const int n" ], "return_type": { @@ -97397,9 +300557,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "b": { "register": "Vn.8B" }, "n": { @@ -97414,16 +300571,15 @@ ], "instructions": [ [ - "SLI" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsli_n_u16", + "name": "vshl_n_u16", "arguments": [ "uint16x4_t a", - "uint16x4_t b", "const int n" ], "return_type": { @@ -97431,9 +300587,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { "register": "Vn.4H" }, "n": { @@ -97448,16 +300601,15 @@ ], "instructions": [ [ - "SLI" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsli_n_u32", + "name": "vshl_n_u32", "arguments": [ "uint32x2_t a", - "uint32x2_t b", "const int n" ], "return_type": { @@ -97465,9 +300617,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { "register": "Vn.2S" }, "n": { @@ -97482,16 +300631,15 @@ ], "instructions": [ [ - "SLI" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsli_n_u64", + "name": "vshl_n_u64", "arguments": [ "uint64x1_t a", - "uint64x1_t b", "const int n" ], "return_type": { @@ -97499,9 +300647,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { "register": "Dn" }, "n": { @@ -97516,16 +300661,15 @@ ], "instructions": [ [ - "SLI" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsli_n_u8", + "name": "vshl_n_u8", "arguments": [ "uint8x8_t a", - "uint8x8_t b", "const int n" ], "return_type": { @@ -97533,9 +300677,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "b": { "register": "Vn.8B" }, "n": { @@ -97550,95 +300691,84 @@ ], "instructions": [ [ - "SLI" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vslid_n_s64", + "name": "vshl_s16", "arguments": [ - "int64_t a", - "int64_t b", - "const int n" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int64_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.4H" }, "b": { - "register": "Dn" - }, - "n": { - "minimum": 0, - "maximum": 63 + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SLI" + "SSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vslid_n_u64", + "name": "vshl_s32", "arguments": [ - "uint64_t a", - "uint64_t b", - "const int n" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "uint64_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.2S" }, "b": { - "register": "Dn" - }, - "n": { - "minimum": 0, - "maximum": 63 + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SLI" + "SSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsliq_n_p16", + "name": "vshl_s64", "arguments": [ - "poly16x8_t a", - "poly16x8_t b", - "const int n" + "int64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "poly16x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Dn" }, "b": { - "register": "Vn.8H" - }, - "n": { - "minimum": 0, - "maximum": 15 + "register": "Dm" } }, "Architectures": [ @@ -97648,64 +300778,55 @@ ], "instructions": [ [ - "SLI" + "SSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsliq_n_p64", + "name": "vshl_s8", "arguments": [ - "poly64x2_t a", - "poly64x2_t b", - "const int n" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "poly64x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.8B" }, "b": { - "register": "Vn.2D" - }, - "n": { - "minimum": 0, - "maximum": 63 + "register": "Vm.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "SLI" + "SSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsliq_n_p8", + "name": "vshl_u16", "arguments": [ - "poly8x16_t a", - "poly8x16_t b", - "const int n" + "uint16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vn.4H" }, "b": { - "register": "Vn.16B" - }, - "n": { - "minimum": 0, - "maximum": 7 + "register": "Vm.4H" } }, "Architectures": [ @@ -97715,31 +300836,26 @@ ], "instructions": [ [ - "SLI" + "USHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsliq_n_s16", + "name": "vshl_u32", "arguments": [ - "int16x8_t a", - "int16x8_t b", - "const int n" + "uint32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "int16x8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vn.2S" }, "b": { - "register": "Vn.8H" - }, - "n": { - "minimum": 0, - "maximum": 15 + "register": "Vm.2S" } }, "Architectures": [ @@ -97749,31 +300865,26 @@ ], "instructions": [ [ - "SLI" + "USHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsliq_n_s32", + "name": "vshl_u64", "arguments": [ - "int32x4_t a", - "int32x4_t b", - "const int n" + "uint64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Dn" }, "b": { - "register": "Vn.4S" - }, - "n": { - "minimum": 0, - "maximum": 31 + "register": "Dm" } }, "Architectures": [ @@ -97783,31 +300894,26 @@ ], "instructions": [ [ - "SLI" + "USHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsliq_n_s64", + "name": "vshl_u8", "arguments": [ - "int64x2_t a", - "int64x2_t b", - "const int n" + "uint8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.8B" }, "b": { - "register": "Vn.2D" - }, - "n": { - "minimum": 0, - "maximum": 63 + "register": "Vm.8B" } }, "Architectures": [ @@ -97817,282 +300923,221 @@ ], "instructions": [ [ - "SLI" + "USHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsliq_n_s8", + "name": "vshld_n_s64", "arguments": [ - "int8x16_t a", - "int8x16_t b", + "int64_t a", "const int n" ], "return_type": { - "value": "int8x16_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "b": { - "register": "Vn.16B" + "register": "Dn" }, "n": { "minimum": 0, - "maximum": 7 + "maximum": 63 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SLI" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsliq_n_u16", + "name": "vshld_n_u64", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", + "uint64_t a", "const int n" ], "return_type": { - "value": "uint16x8_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8H" + "register": "Dn" }, "n": { "minimum": 0, - "maximum": 15 + "maximum": 63 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SLI" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsliq_n_u32", + "name": "vshld_s64", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "const int n" + "int64_t a", + "int64_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Dn" }, "b": { - "register": "Vn.4S" - }, - "n": { - "minimum": 0, - "maximum": 31 + "register": "Dm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SLI" + "SSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsliq_n_u64", + "name": "vshld_u64", "arguments": [ - "uint64x2_t a", - "uint64x2_t b", - "const int n" + "uint64_t a", + "int64_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Dn" }, "b": { - "register": "Vn.2D" - }, - "n": { - "minimum": 0, - "maximum": 63 + "register": "Dm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SLI" + "USHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsliq_n_u8", + "name": "vshll_high_n_s16", "arguments": [ - "uint8x16_t a", - "uint8x16_t b", + "int16x8_t a", "const int n" ], "return_type": { - "value": "uint8x16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "b": { - "register": "Vn.16B" + "register": "Vn.8H" }, "n": { "minimum": 0, - "maximum": 7 + "maximum": 16 } }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "SLI" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vsm3partw1q_u32", - "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c" - ], - "return_type": { - "value": "uint32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.4S" - }, - "b": {}, - "c": {} - }, "Architectures": [ "A64" ], "instructions": [ [ - "SM3PARTW1" + "SSHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsm3partw2q_u32", + "name": "vshll_high_n_s32", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c" + "int32x4_t a", + "const int n" ], "return_type": { - "value": "uint32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.4S" }, - "b": {}, - "c": {} + "n": { + "minimum": 0, + "maximum": 32 + } }, "Architectures": [ "A64" ], "instructions": [ [ - "SM3PARTW2" + "SSHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsm3ss1q_u32", + "name": "vshll_high_n_s8", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c" + "int8x16_t a", + "const int n" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.16B" }, - "b": {}, - "c": {} + "n": { + "minimum": 0, + "maximum": 8 + } }, "Architectures": [ "A64" ], "instructions": [ [ - "SM3SS1" + "SSHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsm3tt1aq_u32", + "name": "vshll_high_n_u16", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c", - "const int imm2" + "uint16x8_t a", + "const int n" ], "return_type": { "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.8H" }, - "b": {}, - "c": {}, - "imm2": { + "n": { "minimum": 0, - "maximum": 3 + "maximum": 16 } }, "Architectures": [ @@ -98100,31 +301145,27 @@ ], "instructions": [ [ - "SM3TT1A" + "USHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsm3tt1bq_u32", + "name": "vshll_high_n_u32", "arguments": [ "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c", - "const int imm2" + "const int n" ], "return_type": { - "value": "uint32x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.4S" }, - "b": {}, - "c": {}, - "imm2": { + "n": { "minimum": 0, - "maximum": 3 + "maximum": 32 } }, "Architectures": [ @@ -98132,31 +301173,27 @@ ], "instructions": [ [ - "SM3TT1B" + "USHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsm3tt2aq_u32", + "name": "vshll_high_n_u8", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c", - "const int imm2" + "uint8x16_t a", + "const int n" ], "return_type": { - "value": "uint32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.16B" }, - "b": {}, - "c": {}, - "imm2": { + "n": { "minimum": 0, - "maximum": 3 + "maximum": 8 } }, "Architectures": [ @@ -98164,584 +301201,667 @@ ], "instructions": [ [ - "SM3TT2A" + "USHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsm3tt2bq_u32", + "name": "vshll_n_s16", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c", - "const int imm2" + "int16x4_t a", + "const int n" ], "return_type": { - "value": "uint32x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.4H" }, - "b": {}, - "c": {}, - "imm2": { + "n": { "minimum": 0, - "maximum": 3 + "maximum": 16 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SM3TT2B" + "SSHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsm4ekeyq_u32", + "name": "vshll_n_s32", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int32x2_t a", + "const int n" ], "return_type": { - "value": "uint32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2S" }, - "b": {} + "n": { + "minimum": 0, + "maximum": 32 + } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SM4EKEY" + "SSHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsm4eq_u32", + "name": "vshll_n_s8", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int8x8_t a", + "const int n" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.8B" }, - "b": {} + "n": { + "minimum": 0, + "maximum": 8 + } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SM4E" + "SSHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqadd_u16", + "name": "vshll_n_u16", "arguments": [ "uint16x4_t a", - "int16x4_t b" + "const int n" ], "return_type": { - "value": "uint16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { "register": "Vn.4H" + }, + "n": { + "minimum": 0, + "maximum": 16 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "USHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqadd_u32", + "name": "vshll_n_u32", "arguments": [ "uint32x2_t a", - "int32x2_t b" + "const int n" ], "return_type": { - "value": "uint32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { "register": "Vn.2S" + }, + "n": { + "minimum": 0, + "maximum": 32 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "USHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqadd_u64", + "name": "vshll_n_u8", "arguments": [ - "uint64x1_t a", - "int64x1_t b" + "uint8x8_t a", + "const int n" ], "return_type": { - "value": "uint64x1_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.8B" }, - "b": { - "register": "Dn" + "n": { + "minimum": 0, + "maximum": 8 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "USHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqadd_u8", + "name": "vshlq_n_s16", "arguments": [ - "uint8x8_t a", - "int8x8_t b" + "int16x8_t a", + "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vn.8H" }, - "b": { - "register": "Vn.8B" + "n": { + "minimum": 0, + "maximum": 15 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqaddb_u8", + "name": "vshlq_n_s32", "arguments": [ - "uint8_t a", - "int8_t b" + "int32x4_t a", + "const int n" ], "return_type": { - "value": "uint8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Bd" + "register": "Vn.4S" }, - "b": { - "register": "Bn" + "n": { + "minimum": 0, + "maximum": 31 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqaddd_u64", + "name": "vshlq_n_s64", "arguments": [ - "uint64_t a", - "int64_t b" + "int64x2_t a", + "const int n" ], "return_type": { - "value": "uint64_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.2D" }, - "b": { - "register": "Dn" + "n": { + "minimum": 0, + "maximum": 63 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqaddh_u16", + "name": "vshlq_n_s8", "arguments": [ - "uint16_t a", - "int16_t b" + "int8x16_t a", + "const int n" ], "return_type": { - "value": "uint16_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Hd" + "register": "Vn.16B" }, - "b": { - "register": "Hn" + "n": { + "minimum": 0, + "maximum": 7 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqaddq_u16", + "name": "vshlq_n_u16", "arguments": [ "uint16x8_t a", - "int16x8_t b" + "const int n" ], "return_type": { "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { "register": "Vn.8H" + }, + "n": { + "minimum": 0, + "maximum": 15 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqaddq_u32", + "name": "vshlq_n_u32", "arguments": [ "uint32x4_t a", - "int32x4_t b" + "const int n" ], "return_type": { "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.4S" + }, + "n": { + "minimum": 0, + "maximum": 31 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqaddq_u64", + "name": "vshlq_n_u64", "arguments": [ "uint64x2_t a", - "int64x2_t b" + "const int n" ], "return_type": { "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { "register": "Vn.2D" + }, + "n": { + "minimum": 0, + "maximum": 63 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqaddq_u8", + "name": "vshlq_n_u8", "arguments": [ "uint8x16_t a", - "int8x16_t b" + "const int n" ], "return_type": { "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "b": { "register": "Vn.16B" + }, + "n": { + "minimum": 0, + "maximum": 7 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqadds_u32", + "name": "vshlq_s16", "arguments": [ - "uint32_t a", - "int32_t b" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "uint32_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Vn.8H" }, "b": { - "register": "Sn" + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "SSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqrt_f16", + "name": "vshlq_s32", "arguments": [ - "float16x4_t a" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "float16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FSQRT" + "SSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqrt_f32", + "name": "vshlq_s64", "arguments": [ - "float32x2_t a" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "float32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FSQRT" + "SSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqrt_f64", + "name": "vshlq_s8", "arguments": [ - "float64x1_t a" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "float64x1_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FSQRT" + "SSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqrth_f16", + "name": "vshlq_u16", "arguments": [ - "float16_t a" + "uint16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "float16_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FSQRT" + "USHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqrtq_f16", + "name": "vshlq_u32", "arguments": [ - "float16x8_t a" + "uint32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "float16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FSQRT" + "USHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqrtq_f32", + "name": "vshlq_u64", "arguments": [ - "float32x4_t a" + "uint64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "float32x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FSQRT" + "USHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqrtq_f64", + "name": "vshlq_u8", "arguments": [ - "float64x2_t a" + "uint8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "float64x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FSQRT" + "USHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsra_n_s16", + "name": "vshr_n_s16", "arguments": [ "int16x4_t a", - "int16x4_t b", "const int n" ], "return_type": { @@ -98749,9 +301869,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { "register": "Vn.4H" }, "n": { @@ -98766,16 +301883,15 @@ ], "instructions": [ [ - "SSRA" + "SSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsra_n_s32", + "name": "vshr_n_s32", "arguments": [ "int32x2_t a", - "int32x2_t b", "const int n" ], "return_type": { @@ -98783,9 +301899,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { "register": "Vn.2S" }, "n": { @@ -98800,16 +301913,15 @@ ], "instructions": [ [ - "SSRA" + "SSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsra_n_s64", + "name": "vshr_n_s64", "arguments": [ "int64x1_t a", - "int64x1_t b", "const int n" ], "return_type": { @@ -98817,9 +301929,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { "register": "Dn" }, "n": { @@ -98834,16 +301943,15 @@ ], "instructions": [ [ - "SSRA" + "SSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsra_n_s8", + "name": "vshr_n_s8", "arguments": [ "int8x8_t a", - "int8x8_t b", "const int n" ], "return_type": { @@ -98851,9 +301959,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "b": { "register": "Vn.8B" }, "n": { @@ -98868,16 +301973,15 @@ ], "instructions": [ [ - "SSRA" + "SSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsra_n_u16", + "name": "vshr_n_u16", "arguments": [ "uint16x4_t a", - "uint16x4_t b", "const int n" ], "return_type": { @@ -98885,9 +301989,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { "register": "Vn.4H" }, "n": { @@ -98902,16 +302003,15 @@ ], "instructions": [ [ - "USRA" + "USHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsra_n_u32", + "name": "vshr_n_u32", "arguments": [ "uint32x2_t a", - "uint32x2_t b", "const int n" ], "return_type": { @@ -98919,9 +302019,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { "register": "Vn.2S" }, "n": { @@ -98936,16 +302033,15 @@ ], "instructions": [ [ - "USRA" + "USHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsra_n_u64", + "name": "vshr_n_u64", "arguments": [ "uint64x1_t a", - "uint64x1_t b", "const int n" ], "return_type": { @@ -98953,9 +302049,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { "register": "Dn" }, "n": { @@ -98970,16 +302063,15 @@ ], "instructions": [ [ - "USRA" + "USHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsra_n_u8", + "name": "vshr_n_u8", "arguments": [ "uint8x8_t a", - "uint8x8_t b", "const int n" ], "return_type": { @@ -98987,9 +302079,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "b": { "register": "Vn.8B" }, "n": { @@ -99004,16 +302093,15 @@ ], "instructions": [ [ - "USRA" + "USHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsrad_n_s64", + "name": "vshrd_n_s64", "arguments": [ "int64_t a", - "int64_t b", "const int n" ], "return_type": { @@ -99021,9 +302109,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { "register": "Dn" }, "n": { @@ -99036,16 +302121,15 @@ ], "instructions": [ [ - "SSRA" + "SSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsrad_n_u64", + "name": "vshrd_n_u64", "arguments": [ "uint64_t a", - "uint64_t b", "const int n" ], "return_type": { @@ -99053,9 +302137,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { "register": "Dn" }, "n": { @@ -99068,152 +302149,144 @@ ], "instructions": [ [ - "USRA" + "USHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsraq_n_s16", + "name": "vshrn_high_n_s16", "arguments": [ + "int8x8_t r", "int16x8_t a", - "int16x8_t b", "const int n" ], "return_type": { - "value": "int16x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { "register": "Vn.8H" }, "n": { "minimum": 1, - "maximum": 16 + "maximum": 8 + }, + "r": { + "register": "Vd.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SSRA" + "SHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsraq_n_s32", + "name": "vshrn_high_n_s32", "arguments": [ + "int16x4_t r", "int32x4_t a", - "int32x4_t b", "const int n" ], "return_type": { - "value": "int32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.4S" }, "n": { "minimum": 1, - "maximum": 32 + "maximum": 16 + }, + "r": { + "register": "Vd.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SSRA" + "SHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsraq_n_s64", + "name": "vshrn_high_n_s64", "arguments": [ + "int32x2_t r", "int64x2_t a", - "int64x2_t b", "const int n" ], "return_type": { - "value": "int64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { "register": "Vn.2D" }, "n": { "minimum": 1, - "maximum": 64 + "maximum": 32 + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SSRA" + "SHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsraq_n_s8", + "name": "vshrn_high_n_u16", "arguments": [ - "int8x16_t a", - "int8x16_t b", + "uint8x8_t r", + "uint16x8_t a", "const int n" ], "return_type": { - "value": "int8x16_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "b": { - "register": "Vn.16B" + "register": "Vn.8H" }, "n": { "minimum": 1, "maximum": 8 + }, + "r": { + "register": "Vd.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SSRA" + "SHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsraq_n_u16", + "name": "vshrn_high_n_u32", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", + "uint16x4_t r", + "uint32x4_t a", "const int n" ], "return_type": { @@ -99221,33 +302294,31 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8H" + "register": "Vn.4S" }, "n": { "minimum": 1, "maximum": 16 + }, + "r": { + "register": "Vd.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USRA" + "SHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsraq_n_u32", + "name": "vshrn_high_n_u64", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", + "uint32x2_t r", + "uint64x2_t a", "const int n" ], "return_type": { @@ -99255,48 +302326,42 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" + "register": "Vn.2D" }, "n": { "minimum": 1, "maximum": 32 + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USRA" + "SHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsraq_n_u64", + "name": "vshrn_n_s16", "arguments": [ - "uint64x2_t a", - "uint64x2_t b", + "int16x8_t a", "const int n" ], "return_type": { - "value": "uint64x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.2D" + "register": "Vn.8H" }, "n": { "minimum": 1, - "maximum": 64 + "maximum": 8 } }, "Architectures": [ @@ -99306,31 +302371,27 @@ ], "instructions": [ [ - "USRA" + "SHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsraq_n_u8", + "name": "vshrn_n_s32", "arguments": [ - "uint8x16_t a", - "uint8x16_t b", + "int32x4_t a", "const int n" ], "return_type": { - "value": "uint8x16_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "b": { - "register": "Vn.16B" + "register": "Vn.4S" }, "n": { "minimum": 1, - "maximum": 8 + "maximum": 16 } }, "Architectures": [ @@ -99340,31 +302401,27 @@ ], "instructions": [ [ - "USRA" + "SHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsri_n_p16", + "name": "vshrn_n_s64", "arguments": [ - "poly16x4_t a", - "poly16x4_t b", + "int64x2_t a", "const int n" ], "return_type": { - "value": "poly16x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.4H" + "register": "Vn.2D" }, "n": { "minimum": 1, - "maximum": 16 + "maximum": 32 } }, "Architectures": [ @@ -99374,64 +302431,57 @@ ], "instructions": [ [ - "SRI" + "SHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsri_n_p64", + "name": "vshrn_n_u16", "arguments": [ - "poly64x1_t a", - "poly64x1_t b", + "uint16x8_t a", "const int n" ], "return_type": { - "value": "poly64x1_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { - "register": "Dn" + "register": "Vn.8H" }, "n": { "minimum": 1, - "maximum": 64 + "maximum": 8 } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "SRI" + "SHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsri_n_p8", + "name": "vshrn_n_u32", "arguments": [ - "poly8x8_t a", - "poly8x8_t b", + "uint32x4_t a", "const int n" ], "return_type": { - "value": "poly8x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "b": { - "register": "Vn.8B" + "register": "Vn.4S" }, "n": { "minimum": 1, - "maximum": 8 + "maximum": 16 } }, "Architectures": [ @@ -99441,31 +302491,27 @@ ], "instructions": [ [ - "SRI" + "SHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsri_n_s16", + "name": "vshrn_n_u64", "arguments": [ - "int16x4_t a", - "int16x4_t b", + "uint64x2_t a", "const int n" ], "return_type": { - "value": "int16x4_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.4H" + "register": "Vn.2D" }, "n": { "minimum": 1, - "maximum": 16 + "maximum": 32 } }, "Architectures": [ @@ -99475,31 +302521,27 @@ ], "instructions": [ [ - "SRI" + "SHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsri_n_s32", + "name": "vshrq_n_s16", "arguments": [ - "int32x2_t a", - "int32x2_t b", + "int16x8_t a", "const int n" ], "return_type": { - "value": "int32x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.2S" + "register": "Vn.8H" }, "n": { "minimum": 1, - "maximum": 32 + "maximum": 16 } }, "Architectures": [ @@ -99509,31 +302551,27 @@ ], "instructions": [ [ - "SRI" + "SSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsri_n_s64", + "name": "vshrq_n_s32", "arguments": [ - "int64x1_t a", - "int64x1_t b", + "int32x4_t a", "const int n" ], "return_type": { - "value": "int64x1_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { - "register": "Dn" + "register": "Vn.4S" }, "n": { "minimum": 1, - "maximum": 64 + "maximum": 32 } }, "Architectures": [ @@ -99543,31 +302581,27 @@ ], "instructions": [ [ - "SRI" + "SSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsri_n_s8", + "name": "vshrq_n_s64", "arguments": [ - "int8x8_t a", - "int8x8_t b", + "int64x2_t a", "const int n" ], "return_type": { - "value": "int8x8_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "b": { - "register": "Vn.8B" + "register": "Vn.2D" }, "n": { "minimum": 1, - "maximum": 8 + "maximum": 64 } }, "Architectures": [ @@ -99577,31 +302611,27 @@ ], "instructions": [ [ - "SRI" + "SSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsri_n_u16", + "name": "vshrq_n_s8", "arguments": [ - "uint16x4_t a", - "uint16x4_t b", + "int8x16_t a", "const int n" ], "return_type": { - "value": "uint16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.4H" + "register": "Vn.16B" }, "n": { "minimum": 1, - "maximum": 16 + "maximum": 8 } }, "Architectures": [ @@ -99611,31 +302641,27 @@ ], "instructions": [ [ - "SRI" + "SSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsri_n_u32", + "name": "vshrq_n_u16", "arguments": [ - "uint32x2_t a", - "uint32x2_t b", + "uint16x8_t a", "const int n" ], "return_type": { - "value": "uint32x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.2S" + "register": "Vn.8H" }, "n": { "minimum": 1, - "maximum": 32 + "maximum": 16 } }, "Architectures": [ @@ -99645,31 +302671,27 @@ ], "instructions": [ [ - "SRI" + "USHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsri_n_u64", + "name": "vshrq_n_u32", "arguments": [ - "uint64x1_t a", - "uint64x1_t b", + "uint32x4_t a", "const int n" ], "return_type": { - "value": "uint64x1_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { - "register": "Dn" + "register": "Vn.4S" }, "n": { "minimum": 1, - "maximum": 64 + "maximum": 32 } }, "Architectures": [ @@ -99679,31 +302701,27 @@ ], "instructions": [ [ - "SRI" + "USHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsri_n_u8", + "name": "vshrq_n_u64", "arguments": [ - "uint8x8_t a", - "uint8x8_t b", + "uint64x2_t a", "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "b": { - "register": "Vn.8B" + "register": "Vn.2D" }, "n": { "minimum": 1, - "maximum": 8 + "maximum": 64 } }, "Architectures": [ @@ -99713,162 +302731,162 @@ ], "instructions": [ [ - "SRI" + "USHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsrid_n_s64", + "name": "vshrq_n_u8", "arguments": [ - "int64_t a", - "int64_t b", + "uint8x16_t a", "const int n" ], "return_type": { - "value": "int64_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { - "register": "Dn" + "register": "Vn.16B" }, "n": { "minimum": 1, - "maximum": 64 + "maximum": 8 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SRI" + "USHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsrid_n_u64", + "name": "vsli_n_p16", "arguments": [ - "uint64_t a", - "uint64_t b", + "poly16x4_t a", + "poly16x4_t b", "const int n" ], "return_type": { - "value": "uint64_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vd.4H" }, "b": { - "register": "Dn" + "register": "Vn.4H" }, "n": { - "minimum": 1, - "maximum": 64 + "minimum": 0, + "maximum": 15 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsriq_n_p16", + "name": "vsli_n_p64", "arguments": [ - "poly16x8_t a", - "poly16x8_t b", + "poly64x1_t a", + "poly64x1_t b", "const int n" ], "return_type": { - "value": "poly16x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Dd" }, "b": { - "register": "Vn.8H" + "register": "Dn" }, "n": { - "minimum": 1, - "maximum": 16 + "minimum": 0, + "maximum": 63 } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsriq_n_p64", + "name": "vsli_n_p8", "arguments": [ - "poly64x2_t a", - "poly64x2_t b", + "poly8x8_t a", + "poly8x8_t b", "const int n" ], "return_type": { - "value": "poly64x2_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.8B" }, "b": { - "register": "Vn.2D" + "register": "Vn.8B" }, "n": { - "minimum": 1, - "maximum": 64 + "minimum": 0, + "maximum": 7 } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsriq_n_p8", + "name": "vsli_n_s16", "arguments": [ - "poly8x16_t a", - "poly8x16_t b", + "int16x4_t a", + "int16x4_t b", "const int n" ], "return_type": { - "value": "poly8x16_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.4H" }, "b": { - "register": "Vn.16B" + "register": "Vn.4H" }, "n": { - "minimum": 1, - "maximum": 8 + "minimum": 0, + "maximum": 15 } }, "Architectures": [ @@ -99878,31 +302896,31 @@ ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsriq_n_s16", + "name": "vsli_n_s32", "arguments": [ - "int16x8_t a", - "int16x8_t b", + "int32x2_t a", + "int32x2_t b", "const int n" ], "return_type": { - "value": "int16x8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2S" }, "b": { - "register": "Vn.8H" + "register": "Vn.2S" }, "n": { - "minimum": 1, - "maximum": 16 + "minimum": 0, + "maximum": 31 } }, "Architectures": [ @@ -99912,31 +302930,31 @@ ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsriq_n_s32", + "name": "vsli_n_s64", "arguments": [ - "int32x4_t a", - "int32x4_t b", + "int64x1_t a", + "int64x1_t b", "const int n" ], "return_type": { - "value": "int32x4_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Dd" }, "b": { - "register": "Vn.4S" + "register": "Dn" }, "n": { - "minimum": 1, - "maximum": 32 + "minimum": 0, + "maximum": 63 } }, "Architectures": [ @@ -99946,31 +302964,31 @@ ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsriq_n_s64", + "name": "vsli_n_s8", "arguments": [ - "int64x2_t a", - "int64x2_t b", + "int8x8_t a", + "int8x8_t b", "const int n" ], "return_type": { - "value": "int64x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.8B" }, "b": { - "register": "Vn.2D" + "register": "Vn.8B" }, "n": { - "minimum": 1, - "maximum": 64 + "minimum": 0, + "maximum": 7 } }, "Architectures": [ @@ -99980,31 +302998,31 @@ ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsriq_n_s8", + "name": "vsli_n_u16", "arguments": [ - "int8x16_t a", - "int8x16_t b", + "uint16x4_t a", + "uint16x4_t b", "const int n" ], "return_type": { - "value": "int8x16_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.4H" }, "b": { - "register": "Vn.16B" + "register": "Vn.4H" }, "n": { - "minimum": 1, - "maximum": 8 + "minimum": 0, + "maximum": 15 } }, "Architectures": [ @@ -100014,31 +303032,31 @@ ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsriq_n_u16", + "name": "vsli_n_u32", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", + "uint32x2_t a", + "uint32x2_t b", "const int n" ], "return_type": { - "value": "uint16x8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2S" }, "b": { - "register": "Vn.8H" + "register": "Vn.2S" }, "n": { - "minimum": 1, - "maximum": 16 + "minimum": 0, + "maximum": 31 } }, "Architectures": [ @@ -100048,31 +303066,31 @@ ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsriq_n_u32", + "name": "vsli_n_u64", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", + "uint64x1_t a", + "uint64x1_t b", "const int n" ], "return_type": { - "value": "uint32x4_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Dd" }, "b": { - "register": "Vn.4S" + "register": "Dn" }, "n": { - "minimum": 1, - "maximum": 32 + "minimum": 0, + "maximum": 63 } }, "Architectures": [ @@ -100082,31 +303100,31 @@ ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsriq_n_u64", + "name": "vsli_n_u8", "arguments": [ - "uint64x2_t a", - "uint64x2_t b", + "uint8x8_t a", + "uint8x8_t b", "const int n" ], "return_type": { - "value": "uint64x2_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.8B" }, "b": { - "register": "Vn.2D" + "register": "Vn.8B" }, "n": { - "minimum": 1, - "maximum": 64 + "minimum": 0, + "maximum": 7 } }, "Architectures": [ @@ -100116,89 +303134,95 @@ ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsriq_n_u8", + "name": "vslid_n_s64", "arguments": [ - "uint8x16_t a", - "uint8x16_t b", + "int64_t a", + "int64_t b", "const int n" ], "return_type": { - "value": "uint8x16_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Dd" }, "b": { - "register": "Vn.16B" + "register": "Dn" }, "n": { - "minimum": 1, - "maximum": 8 + "minimum": 0, + "maximum": 63 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f16", + "name": "vslid_n_u64", "arguments": [ - "float16_t * ptr", - "float16x4_t val" + "uint64_t a", + "uint64_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint64_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt.4H" + "b": { + "register": "Dn" + }, + "n": { + "minimum": 0, + "maximum": 63 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f16_x2", + "name": "vsliq_n_p16", "arguments": [ - "float16_t * ptr", - "float16x4x2_t val" + "poly16x8_t a", + "poly16x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "poly16x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8H" }, - "val": { - "register": "Vt2.4H" + "b": { + "register": "Vn.8H" + }, + "n": { + "minimum": 0, + "maximum": 15 } }, "Architectures": [ @@ -100208,55 +303232,64 @@ ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f16_x3", + "name": "vsliq_n_p64", "arguments": [ - "float16_t * ptr", - "float16x4x3_t val" + "poly64x2_t a", + "poly64x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "poly64x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2D" }, - "val": { - "register": "Vt3.4H" + "b": { + "register": "Vn.2D" + }, + "n": { + "minimum": 0, + "maximum": 63 } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f16_x4", + "name": "vsliq_n_p8", "arguments": [ - "float16_t * ptr", - "float16x4x4_t val" + "poly8x16_t a", + "poly8x16_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.16B" }, - "val": { - "register": "Vt4.4H" + "b": { + "register": "Vn.16B" + }, + "n": { + "minimum": 0, + "maximum": 7 } }, "Architectures": [ @@ -100266,26 +303299,31 @@ ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f32", + "name": "vsliq_n_s16", "arguments": [ - "float32_t * ptr", - "float32x2_t val" + "int16x8_t a", + "int16x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int16x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8H" }, - "val": { - "register": "Vt.2S" + "b": { + "register": "Vn.8H" + }, + "n": { + "minimum": 0, + "maximum": 15 } }, "Architectures": [ @@ -100295,26 +303333,31 @@ ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f32_x2", + "name": "vsliq_n_s32", "arguments": [ - "float32_t * ptr", - "float32x2x2_t val" + "int32x4_t a", + "int32x4_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int32x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4S" }, - "val": { - "register": "Vt2.2S" + "b": { + "register": "Vn.4S" + }, + "n": { + "minimum": 0, + "maximum": 31 } }, "Architectures": [ @@ -100324,26 +303367,31 @@ ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f32_x3", + "name": "vsliq_n_s64", "arguments": [ - "float32_t * ptr", - "float32x2x3_t val" + "int64x2_t a", + "int64x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int64x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2D" }, - "val": { - "register": "Vt3.2S" + "b": { + "register": "Vn.2D" + }, + "n": { + "minimum": 0, + "maximum": 63 } }, "Architectures": [ @@ -100353,26 +303401,31 @@ ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f32_x4", + "name": "vsliq_n_s8", "arguments": [ - "float32_t * ptr", - "float32x2x4_t val" + "int8x16_t a", + "int8x16_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int8x16_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.16B" }, - "val": { - "register": "Vt4.2S" + "b": { + "register": "Vn.16B" + }, + "n": { + "minimum": 0, + "maximum": 7 } }, "Architectures": [ @@ -100382,207 +303435,228 @@ ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f64", + "name": "vsliq_n_u16", "arguments": [ - "float64_t * ptr", - "float64x1_t val" + "uint16x8_t a", + "uint16x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8H" }, - "val": { - "register": "Vt.1D" + "b": { + "register": "Vn.8H" + }, + "n": { + "minimum": 0, + "maximum": 15 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f64_x2", + "name": "vsliq_n_u32", "arguments": [ - "float64_t * ptr", - "float64x1x2_t val" + "uint32x4_t a", + "uint32x4_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4S" }, - "val": { - "register": "Vt2.1D" + "b": { + "register": "Vn.4S" + }, + "n": { + "minimum": 0, + "maximum": 31 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f64_x3", + "name": "vsliq_n_u64", "arguments": [ - "float64_t * ptr", - "float64x1x3_t val" + "uint64x2_t a", + "uint64x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2D" }, - "val": { - "register": "Vt3.1D" + "b": { + "register": "Vn.2D" + }, + "n": { + "minimum": 0, + "maximum": 63 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f64_x4", + "name": "vsliq_n_u8", "arguments": [ - "float64_t * ptr", - "float64x1x4_t val" + "uint8x16_t a", + "uint8x16_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.16B" }, - "val": { - "register": "Vt4.1D" + "b": { + "register": "Vn.16B" + }, + "n": { + "minimum": 0, + "maximum": 7 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_f16", + "name": "vsm3partw1q_u32", "arguments": [ - "float16_t * ptr", - "float16x4_t val", - "const int lane" + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "a": { + "register": "Vd.4S" }, - "ptr": { - "register": "Xn" + "b": { + "register": "Vn.4S" }, - "val": { - "register": "Vt.4H" + "c": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "SM3PARTW1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_f32", + "name": "vsm3partw2q_u32", "arguments": [ - "float32_t * ptr", - "float32x2_t val", - "const int lane" + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "a": { + "register": "Vd.4S" }, - "ptr": { - "register": "Xn" + "b": { + "register": "Vn.4S" }, - "val": { - "register": "Vt.2S" + "c": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "SM3PARTW2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_f64", + "name": "vsm3ss1q_u32", "arguments": [ - "float64_t * ptr", - "float64x1_t val", - "const int lane" + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "a": { + "register": "Vn.4S" }, - "ptr": { - "register": "Xn" + "b": { + "register": "Vm.4S" }, - "val": { - "register": "Vt.1D" + "c": { + "register": "Va.4S" } }, "Architectures": [ @@ -100590,801 +303664,715 @@ ], "instructions": [ [ - "ST1" + "SM3SS1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_p16", + "name": "vsm3tt1aq_u32", "arguments": [ - "poly16_t * ptr", - "poly16x4_t val", - "const int lane" + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c", + "const int imm2" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "a": { + "register": "Vd.4S" }, - "ptr": { - "register": "Xn" + "b": { + "register": "Vn.4S" }, - "val": { - "register": "Vt.4H" + "c": { + "register": "Vm.4S" + }, + "imm2": { + "minimum": 0, + "maximum": 3 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "SM3TT1A" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_p64", + "name": "vsm3tt1bq_u32", "arguments": [ - "poly64_t * ptr", - "poly64x1_t val", - "const int lane" + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c", + "const int imm2" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "a": { + "register": "Vd.4S" }, - "ptr": { - "register": "Xn" + "b": { + "register": "Vn.4S" }, - "val": { - "register": "Vt.1D" + "c": { + "register": "Vm.4S" + }, + "imm2": { + "minimum": 0, + "maximum": 3 } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "ST1" + "SM3TT1B" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_p8", + "name": "vsm3tt2aq_u32", "arguments": [ - "poly8_t * ptr", - "poly8x8_t val", - "const int lane" + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c", + "const int imm2" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "a": { + "register": "Vd.4S" }, - "ptr": { - "register": "Xn" + "b": { + "register": "Vn.4S" }, - "val": { - "register": "Vt.8B" + "c": { + "register": "Vm.4S" + }, + "imm2": { + "minimum": 0, + "maximum": 3 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "SM3TT2A" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_s16", + "name": "vsm3tt2bq_u32", "arguments": [ - "int16_t * ptr", - "int16x4_t val", - "const int lane" + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c", + "const int imm2" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "a": { + "register": "Vd.4S" }, - "ptr": { - "register": "Xn" + "b": { + "register": "Vn.4S" }, - "val": { - "register": "Vt.4H" + "c": { + "register": "Vm.4S" + }, + "imm2": { + "minimum": 0, + "maximum": 3 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "SM3TT2B" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_s32", + "name": "vsm4ekeyq_u32", "arguments": [ - "int32_t * ptr", - "int32x2_t val", - "const int lane" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, - "ptr": { - "register": "Xn" + "a": { + "register": "Vn.4S" }, - "val": { - "register": "Vt.2S" + "b": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "SM4EKEY" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_s64", + "name": "vsm4eq_u32", "arguments": [ - "int64_t * ptr", - "int64x1_t val", - "const int lane" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4S" }, - "val": { - "register": "Vt.1D" + "b": { + "register": "Vn.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "SM4E" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_s8", + "name": "vsqadd_u16", "arguments": [ - "int8_t * ptr", - "int8x8_t val", - "const int lane" + "uint16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "void" + "value": "uint16x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4H" }, - "val": { - "register": "Vt.8B" + "b": { + "register": "Vn.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_u16", + "name": "vsqadd_u32", "arguments": [ - "uint16_t * ptr", - "uint16x4_t val", - "const int lane" + "uint32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "void" + "value": "uint32x2_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2S" }, - "val": { - "register": "Vt.4H" + "b": { + "register": "Vn.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_u32", + "name": "vsqadd_u64", "arguments": [ - "uint32_t * ptr", - "uint32x2_t val", - "const int lane" + "uint64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "void" + "value": "uint64x1_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt.2S" + "b": { + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_u64", + "name": "vsqadd_u8", "arguments": [ - "uint64_t * ptr", - "uint64x1_t val", - "const int lane" + "uint8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "void" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8B" }, - "val": { - "register": "Vt.1D" + "b": { + "register": "Vn.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_u8", + "name": "vsqaddb_u8", "arguments": [ - "uint8_t * ptr", - "uint8x8_t val", - "const int lane" + "uint8_t a", + "int8_t b" ], "return_type": { - "value": "void" + "value": "uint8_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, - "ptr": { - "register": "Xn" + "a": { + "register": "Bd" }, - "val": { - "register": "Vt.8B" + "b": { + "register": "Bn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p16", + "name": "vsqaddd_u64", "arguments": [ - "poly16_t * ptr", - "poly16x4_t val" + "uint64_t a", + "int64_t b" ], "return_type": { - "value": "void" + "value": "uint64_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt.4H" + "b": { + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p16_x2", + "name": "vsqaddh_u16", "arguments": [ - "poly16_t * ptr", - "poly16x4x2_t val" + "uint16_t a", + "int16_t b" ], "return_type": { - "value": "void" + "value": "uint16_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Hd" }, - "val": { - "register": "Vt2.4H" + "b": { + "register": "Hn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p16_x3", + "name": "vsqaddq_u16", "arguments": [ - "poly16_t * ptr", - "poly16x4x3_t val" + "uint16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "void" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8H" }, - "val": { - "register": "Vt3.4H" + "b": { + "register": "Vn.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p16_x4", + "name": "vsqaddq_u32", "arguments": [ - "poly16_t * ptr", - "poly16x4x4_t val" + "uint32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4S" }, - "val": { - "register": "Vt4.4H" + "b": { + "register": "Vn.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p64", + "name": "vsqaddq_u64", "arguments": [ - "poly64_t * ptr", - "poly64x1_t val" + "uint64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "void" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2D" }, - "val": { - "register": "Vt.1D" + "b": { + "register": "Vn.2D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p64_x2", + "name": "vsqaddq_u8", "arguments": [ - "poly64_t * ptr", - "poly64x1x2_t val" + "uint8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "void" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.16B" }, - "val": { - "register": "Vt2.1D" + "b": { + "register": "Vn.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p64_x3", + "name": "vsqadds_u32", "arguments": [ - "poly64_t * ptr", - "poly64x1x3_t val" + "uint32_t a", + "int32_t b" ], "return_type": { - "value": "void" + "value": "uint32_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Sd" }, - "val": { - "register": "Vt3.1D" + "b": { + "register": "Sn" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p64_x4", + "name": "vsqrt_f16", "arguments": [ - "poly64_t * ptr", - "poly64x1x4_t val" + "float16x4_t a" ], "return_type": { - "value": "void" + "value": "float16x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" - }, - "val": { - "register": "Vt4.1D" + "a": { + "register": "Vn.4H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "ST1" + "FSQRT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p8", + "name": "vsqrt_f32", "arguments": [ - "poly8_t * ptr", - "poly8x8_t val" + "float32x2_t a" ], "return_type": { - "value": "void" + "value": "float32x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" - }, - "val": { - "register": "Vt.8B" + "a": { + "register": "Vn.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "FSQRT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p8_x2", + "name": "vsqrt_f64", "arguments": [ - "poly8_t * ptr", - "poly8x8x2_t val" + "float64x1_t a" ], "return_type": { - "value": "void" + "value": "float64x1_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" - }, - "val": { - "register": "Vt2.8B" + "a": { + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "FSQRT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p8_x3", + "name": "vsqrth_f16", "arguments": [ - "poly8_t * ptr", - "poly8x8x3_t val" + "float16_t a" ], "return_type": { - "value": "void" + "value": "float16_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" - }, - "val": { - "register": "Vt3.8B" + "a": { + "register": "Hn" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "ST1" + "FSQRT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p8_x4", + "name": "vsqrtq_f16", "arguments": [ - "poly8_t * ptr", - "poly8x8x4_t val" + "float16x8_t a" ], "return_type": { - "value": "void" + "value": "float16x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" - }, - "val": { - "register": "Vt4.8B" + "a": { + "register": "Vn.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "FSQRT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s16", + "name": "vsqrtq_f32", "arguments": [ - "int16_t * ptr", - "int16x4_t val" + "float32x4_t a" ], "return_type": { - "value": "void" + "value": "float32x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" - }, - "val": { - "register": "Vt.4H" + "a": { + "register": "Vn.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "FSQRT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s16_x2", + "name": "vsqrtq_f64", "arguments": [ - "int16_t * ptr", - "int16x4x2_t val" + "float64x2_t a" ], "return_type": { - "value": "void" + "value": "float64x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" - }, - "val": { - "register": "Vt2.4H" + "a": { + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "FSQRT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s16_x3", + "name": "vsra_n_s16", "arguments": [ - "int16_t * ptr", - "int16x4x3_t val" + "int16x4_t a", + "int16x4_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int16x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4H" }, - "val": { - "register": "Vt3.4H" + "b": { + "register": "Vn.4H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -101394,26 +304382,31 @@ ], "instructions": [ [ - "ST1" + "SSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s16_x4", + "name": "vsra_n_s32", "arguments": [ - "int16_t * ptr", - "int16x4x4_t val" + "int32x2_t a", + "int32x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int32x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2S" }, - "val": { - "register": "Vt4.4H" + "b": { + "register": "Vn.2S" + }, + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -101423,26 +304416,31 @@ ], "instructions": [ [ - "ST1" + "SSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s32", + "name": "vsra_n_s64", "arguments": [ - "int32_t * ptr", - "int32x2_t val" + "int64x1_t a", + "int64x1_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int64x1_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt.2S" + "b": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -101452,26 +304450,31 @@ ], "instructions": [ [ - "ST1" + "SSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s32_x2", + "name": "vsra_n_s8", "arguments": [ - "int32_t * ptr", - "int32x2x2_t val" + "int8x8_t a", + "int8x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int8x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8B" }, - "val": { - "register": "Vt2.2S" + "b": { + "register": "Vn.8B" + }, + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -101481,26 +304484,31 @@ ], "instructions": [ [ - "ST1" + "SSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s32_x3", + "name": "vsra_n_u16", "arguments": [ - "int32_t * ptr", - "int32x2x3_t val" + "uint16x4_t a", + "uint16x4_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint16x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4H" }, - "val": { - "register": "Vt3.2S" + "b": { + "register": "Vn.4H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -101510,26 +304518,31 @@ ], "instructions": [ [ - "ST1" + "USRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s32_x4", + "name": "vsra_n_u32", "arguments": [ - "int32_t * ptr", - "int32x2x4_t val" + "uint32x2_t a", + "uint32x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint32x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2S" }, - "val": { - "register": "Vt4.2S" + "b": { + "register": "Vn.2S" + }, + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -101539,26 +304552,31 @@ ], "instructions": [ [ - "ST1" + "USRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s64", + "name": "vsra_n_u64", "arguments": [ - "int64_t * ptr", - "int64x1_t val" + "uint64x1_t a", + "uint64x1_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint64x1_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt.1D" + "b": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -101568,26 +304586,31 @@ ], "instructions": [ [ - "ST1" + "USRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s64_x2", + "name": "vsra_n_u8", "arguments": [ - "int64_t * ptr", - "int64x1x2_t val" + "uint8x8_t a", + "uint8x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8B" }, - "val": { - "register": "Vt2.1D" + "b": { + "register": "Vn.8B" + }, + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -101597,84 +304620,95 @@ ], "instructions": [ [ - "ST1" + "USRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s64_x3", + "name": "vsrad_n_s64", "arguments": [ - "int64_t * ptr", - "int64x1x3_t val" + "int64_t a", + "int64_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int64_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt3.1D" + "b": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "SSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s64_x4", + "name": "vsrad_n_u64", "arguments": [ - "int64_t * ptr", - "int64x1x4_t val" + "uint64_t a", + "uint64_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint64_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt4.1D" + "b": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "USRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s8", + "name": "vsraq_n_s16", "arguments": [ - "int8_t * ptr", - "int8x8_t val" + "int16x8_t a", + "int16x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int16x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8H" }, - "val": { - "register": "Vt.8B" + "b": { + "register": "Vn.8H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -101684,26 +304718,31 @@ ], "instructions": [ [ - "ST1" + "SSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s8_x2", + "name": "vsraq_n_s32", "arguments": [ - "int8_t * ptr", - "int8x8x2_t val" + "int32x4_t a", + "int32x4_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int32x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4S" }, - "val": { - "register": "Vt2.8B" + "b": { + "register": "Vn.4S" + }, + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -101713,26 +304752,31 @@ ], "instructions": [ [ - "ST1" + "SSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s8_x3", + "name": "vsraq_n_s64", "arguments": [ - "int8_t * ptr", - "int8x8x3_t val" + "int64x2_t a", + "int64x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int64x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2D" }, - "val": { - "register": "Vt3.8B" + "b": { + "register": "Vn.2D" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -101742,26 +304786,31 @@ ], "instructions": [ [ - "ST1" + "SSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s8_x4", + "name": "vsraq_n_s8", "arguments": [ - "int8_t * ptr", - "int8x8x4_t val" + "int8x16_t a", + "int8x16_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int8x16_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.16B" }, - "val": { - "register": "Vt4.8B" + "b": { + "register": "Vn.16B" + }, + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -101771,26 +304820,31 @@ ], "instructions": [ [ - "ST1" + "SSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u16", + "name": "vsraq_n_u16", "arguments": [ - "uint16_t * ptr", - "uint16x4_t val" + "uint16x8_t a", + "uint16x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8H" }, - "val": { - "register": "Vt.4H" + "b": { + "register": "Vn.8H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -101800,26 +304854,31 @@ ], "instructions": [ [ - "ST1" + "USRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u16_x2", + "name": "vsraq_n_u32", "arguments": [ - "uint16_t * ptr", - "uint16x4x2_t val" + "uint32x4_t a", + "uint32x4_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4S" }, - "val": { - "register": "Vt2.4H" + "b": { + "register": "Vn.4S" + }, + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -101829,26 +304888,31 @@ ], "instructions": [ [ - "ST1" + "USRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u16_x3", + "name": "vsraq_n_u64", "arguments": [ - "uint16_t * ptr", - "uint16x4x3_t val" + "uint64x2_t a", + "uint64x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2D" }, - "val": { - "register": "Vt3.4H" + "b": { + "register": "Vn.2D" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -101858,26 +304922,31 @@ ], "instructions": [ [ - "ST1" + "USRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u16_x4", + "name": "vsraq_n_u8", "arguments": [ - "uint16_t * ptr", - "uint16x4x4_t val" + "uint8x16_t a", + "uint8x16_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.16B" }, - "val": { - "register": "Vt4.4H" + "b": { + "register": "Vn.16B" + }, + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -101887,26 +304956,31 @@ ], "instructions": [ [ - "ST1" + "USRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u32", + "name": "vsri_n_p16", "arguments": [ - "uint32_t * ptr", - "uint32x2_t val" + "poly16x4_t a", + "poly16x4_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "poly16x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4H" }, - "val": { - "register": "Vt.2S" + "b": { + "register": "Vn.4H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -101916,55 +304990,64 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u32_x2", + "name": "vsri_n_p64", "arguments": [ - "uint32_t * ptr", - "uint32x2x2_t val" + "poly64x1_t a", + "poly64x1_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "poly64x1_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt2.2S" + "b": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u32_x3", + "name": "vsri_n_p8", "arguments": [ - "uint32_t * ptr", - "uint32x2x3_t val" + "poly8x8_t a", + "poly8x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8B" }, - "val": { - "register": "Vt3.2S" + "b": { + "register": "Vn.8B" + }, + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -101974,26 +305057,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u32_x4", + "name": "vsri_n_s16", "arguments": [ - "uint32_t * ptr", - "uint32x2x4_t val" + "int16x4_t a", + "int16x4_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int16x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4H" }, - "val": { - "register": "Vt4.2S" + "b": { + "register": "Vn.4H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -102003,26 +305091,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u64", + "name": "vsri_n_s32", "arguments": [ - "uint64_t * ptr", - "uint64x1_t val" + "int32x2_t a", + "int32x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int32x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2S" }, - "val": { - "register": "Vt.1D" + "b": { + "register": "Vn.2S" + }, + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -102032,26 +305125,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u64_x2", + "name": "vsri_n_s64", "arguments": [ - "uint64_t * ptr", - "uint64x1x2_t val" + "int64x1_t a", + "int64x1_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int64x1_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt2.1D" + "b": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -102061,26 +305159,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u64_x3", + "name": "vsri_n_s8", "arguments": [ - "uint64_t * ptr", - "uint64x1x3_t val" + "int8x8_t a", + "int8x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int8x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8B" }, - "val": { - "register": "Vt3.1D" + "b": { + "register": "Vn.8B" + }, + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -102090,26 +305193,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u64_x4", + "name": "vsri_n_u16", "arguments": [ - "uint64_t * ptr", - "uint64x1x4_t val" + "uint16x4_t a", + "uint16x4_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint16x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4H" }, - "val": { - "register": "Vt4.1D" + "b": { + "register": "Vn.4H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -102119,26 +305227,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u8", + "name": "vsri_n_u32", "arguments": [ - "uint8_t * ptr", - "uint8x8_t val" + "uint32x2_t a", + "uint32x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint32x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2S" }, - "val": { - "register": "Vt.8B" + "b": { + "register": "Vn.2S" + }, + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -102148,26 +305261,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u8_x2", + "name": "vsri_n_u64", "arguments": [ - "uint8_t * ptr", - "uint8x8x2_t val" + "uint64x1_t a", + "uint64x1_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint64x1_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt2.8B" + "b": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -102177,26 +305295,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u8_x3", + "name": "vsri_n_u8", "arguments": [ - "uint8_t * ptr", - "uint8x8x3_t val" + "uint8x8_t a", + "uint8x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8B" }, - "val": { - "register": "Vt3.8B" + "b": { + "register": "Vn.8B" + }, + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -102206,84 +305329,95 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u8_x4", + "name": "vsrid_n_s64", "arguments": [ - "uint8_t * ptr", - "uint8x8x4_t val" + "int64_t a", + "int64_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int64_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt4.8B" + "b": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f16", + "name": "vsrid_n_u64", "arguments": [ - "float16_t * ptr", - "float16x8_t val" + "uint64_t a", + "uint64_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint64_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt.8H" + "b": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f16_x2", + "name": "vsriq_n_p16", "arguments": [ - "float16_t * ptr", - "float16x8x2_t val" + "poly16x8_t a", + "poly16x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "poly16x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8H" }, - "val": { - "register": "Vt2.8H" + "b": { + "register": "Vn.8H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -102293,55 +305427,64 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f16_x3", + "name": "vsriq_n_p64", "arguments": [ - "float16_t * ptr", - "float16x8x3_t val" + "poly64x2_t a", + "poly64x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "poly64x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2D" }, - "val": { - "register": "Vt3.8H" + "b": { + "register": "Vn.2D" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f16_x4", + "name": "vsriq_n_p8", "arguments": [ - "float16_t * ptr", - "float16x8x4_t val" + "poly8x16_t a", + "poly8x16_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.16B" }, - "val": { - "register": "Vt4.8H" + "b": { + "register": "Vn.16B" + }, + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -102351,26 +305494,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f32", + "name": "vsriq_n_s16", "arguments": [ - "float32_t * ptr", - "float32x4_t val" + "int16x8_t a", + "int16x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int16x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8H" }, - "val": { - "register": "Vt.4S" + "b": { + "register": "Vn.8H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -102380,26 +305528,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f32_x2", + "name": "vsriq_n_s32", "arguments": [ - "float32_t * ptr", - "float32x4x2_t val" + "int32x4_t a", + "int32x4_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int32x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4S" }, - "val": { - "register": "Vt2.4S" + "b": { + "register": "Vn.4S" + }, + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -102409,26 +305562,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f32_x3", + "name": "vsriq_n_s64", "arguments": [ - "float32_t * ptr", - "float32x4x3_t val" + "int64x2_t a", + "int64x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int64x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2D" }, - "val": { - "register": "Vt3.4S" + "b": { + "register": "Vn.2D" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -102438,26 +305596,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f32_x4", + "name": "vsriq_n_s8", "arguments": [ - "float32_t * ptr", - "float32x4x4_t val" + "int8x16_t a", + "int8x16_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int8x16_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.16B" }, - "val": { - "register": "Vt4.4S" + "b": { + "register": "Vn.16B" + }, + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -102467,139 +305630,162 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f64", + "name": "vsriq_n_u16", "arguments": [ - "float64_t * ptr", - "float64x2_t val" + "uint16x8_t a", + "uint16x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8H" }, - "val": { - "register": "Vt.2D" + "b": { + "register": "Vn.8H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f64_x2", + "name": "vsriq_n_u32", "arguments": [ - "float64_t * ptr", - "float64x2x2_t val" + "uint32x4_t a", + "uint32x4_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4S" }, - "val": { - "register": "Vt2.2D" + "b": { + "register": "Vn.4S" + }, + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f64_x3", + "name": "vsriq_n_u64", "arguments": [ - "float64_t * ptr", - "float64x2x3_t val" + "uint64x2_t a", + "uint64x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2D" }, - "val": { - "register": "Vt3.2D" + "b": { + "register": "Vn.2D" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f64_x4", + "name": "vsriq_n_u8", "arguments": [ - "float64_t * ptr", - "float64x2x4_t val" + "uint8x16_t a", + "uint8x16_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.16B" }, - "val": { - "register": "Vt4.2D" + "b": { + "register": "Vn.16B" + }, + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_f16", + "name": "vst1_f16", "arguments": [ "float16_t * ptr", - "float16x8_t val", - "const int lane" + "float16x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt.8H" + "register": "Vt.4H" } }, "Architectures": [ @@ -102615,25 +305801,23 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_f32", + "name": "vst1_f16_x2", "arguments": [ - "float32_t * ptr", - "float32x4_t val", - "const int lane" + "float16_t * ptr", + "float16x4x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.4S" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -102649,28 +305833,31 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_f64", + "name": "vst1_f16_x3", "arguments": [ - "float64_t * ptr", - "float64x2_t val", - "const int lane" + "float16_t * ptr", + "float16x4x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.2D" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -102681,25 +305868,29 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_p16", + "name": "vst1_f16_x4", "arguments": [ - "poly16_t * ptr", - "poly16x8_t val", - "const int lane" + "float16_t * ptr", + "float16x4x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ @@ -102715,28 +305906,24 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_p64", + "name": "vst1_f32", "arguments": [ - "poly64_t * ptr", - "poly64x2_t val", - "const int lane" + "float32_t * ptr", + "float32x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt.2D" + "register": "Vt.2S" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -102748,25 +305935,23 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_p8", + "name": "vst1_f32_x2", "arguments": [ - "poly8_t * ptr", - "poly8x16_t val", - "const int lane" + "float32_t * ptr", + "float32x2x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.16B" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" } }, "Architectures": [ @@ -102782,25 +305967,26 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_s16", + "name": "vst1_f32_x3", "arguments": [ - "int16_t * ptr", - "int16x8_t val", - "const int lane" + "float32_t * ptr", + "float32x2x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.8H" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" } }, "Architectures": [ @@ -102816,25 +306002,29 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_s32", + "name": "vst1_f32_x4", "arguments": [ - "int32_t * ptr", - "int32x4_t val", - "const int lane" + "float32_t * ptr", + "float32x2x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { + "register": "Vt4.2S" } }, "Architectures": [ @@ -102850,30 +306040,23 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_s64", + "name": "vst1_f64", "arguments": [ - "int64_t * ptr", - "int64x2_t val", - "const int lane" + "float64_t * ptr", + "float64x1_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt.2D" + "register": "Vt.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -102884,30 +306067,26 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_s8", + "name": "vst1_f64_x2", "arguments": [ - "int8_t * ptr", - "int8x16_t val", - "const int lane" + "float64_t * ptr", + "float64x1x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.16B" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -102918,30 +306097,29 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_u16", + "name": "vst1_f64_x3", "arguments": [ - "uint16_t * ptr", - "uint16x8_t val", - "const int lane" + "float64_t * ptr", + "float64x1x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.8H" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -102952,30 +306130,32 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_u32", + "name": "vst1_f64_x4", "arguments": [ - "uint32_t * ptr", - "uint32x4_t val", - "const int lane" + "float64_t * ptr", + "float64x1x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.4S" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -102986,10 +306166,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_u64", + "name": "vst1_lane_f16", "arguments": [ - "uint64_t * ptr", - "uint64x2_t val", + "float16_t * ptr", + "float16x4_t val", "const int lane" ], "return_type": { @@ -102998,13 +306178,13 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 3 }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt.2D" + "register": "Vt.4H" } }, "Architectures": [ @@ -103020,10 +306200,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_u8", + "name": "vst1_lane_f32", "arguments": [ - "uint8_t * ptr", - "uint8x16_t val", + "float32_t * ptr", + "float32x2_t val", "const int lane" ], "return_type": { @@ -103032,13 +306212,13 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 15 + "maximum": 1 }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt.16B" + "register": "Vt.2S" } }, "Architectures": [ @@ -103054,25 +306234,28 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p16", + "name": "vst1_lane_f64", "arguments": [ - "poly16_t * ptr", - "poly16x8_t val" + "float64_t * ptr", + "float64x1_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt.8H" + "register": "Vt.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -103083,20 +306266,25 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p16_x2", + "name": "vst1_lane_p16", "arguments": [ "poly16_t * ptr", - "poly16x8x2_t val" + "poly16x4_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.8H" + "register": "Vt.4H" } }, "Architectures": [ @@ -103112,24 +306300,28 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p16_x3", + "name": "vst1_lane_p64", "arguments": [ - "poly16_t * ptr", - "poly16x8x3_t val" + "poly64_t * ptr", + "poly64x1_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt3.8H" + "register": "Vt.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -103141,20 +306333,25 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p16_x4", + "name": "vst1_lane_p8", "arguments": [ - "poly16_t * ptr", - "poly16x8x4_t val" + "poly8_t * ptr", + "poly8x8_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt4.8H" + "register": "Vt.8B" } }, "Architectures": [ @@ -103170,23 +306367,29 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p64", + "name": "vst1_lane_s16", "arguments": [ - "poly64_t * ptr", - "poly64x2_t val" + "int16_t * ptr", + "int16x4_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt.2D" + "register": "Vt.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -103198,23 +306401,29 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p64_x2", + "name": "vst1_lane_s32", "arguments": [ - "poly64_t * ptr", - "poly64x2x2_t val" + "int32_t * ptr", + "int32x2_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.2D" + "register": "Vt.2S" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -103226,20 +306435,25 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p64_x3", + "name": "vst1_lane_s64", "arguments": [ - "poly64_t * ptr", - "poly64x2x3_t val" + "int64_t * ptr", + "int64x1_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt3.2D" + "register": "Vt.1D" } }, "Architectures": [ @@ -103255,23 +306469,29 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p64_x4", + "name": "vst1_lane_s8", "arguments": [ - "poly64_t * ptr", - "poly64x2x4_t val" + "int8_t * ptr", + "int8x8_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt4.2D" + "register": "Vt.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -103283,20 +306503,25 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p8", + "name": "vst1_lane_u16", "arguments": [ - "poly8_t * ptr", - "poly8x16_t val" + "uint16_t * ptr", + "uint16x4_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt.16B" + "register": "Vt.4H" } }, "Architectures": [ @@ -103312,20 +306537,25 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p8_x2", + "name": "vst1_lane_u32", "arguments": [ - "poly8_t * ptr", - "poly8x16x2_t val" + "uint32_t * ptr", + "uint32x2_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.16B" + "register": "Vt.2S" } }, "Architectures": [ @@ -103341,20 +306571,25 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p8_x3", + "name": "vst1_lane_u64", "arguments": [ - "poly8_t * ptr", - "poly8x16x3_t val" + "uint64_t * ptr", + "uint64x1_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt3.16B" + "register": "Vt.1D" } }, "Architectures": [ @@ -103370,20 +306605,25 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p8_x4", + "name": "vst1_lane_u8", "arguments": [ - "poly8_t * ptr", - "poly8x16x4_t val" + "uint8_t * ptr", + "uint8x8_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt4.16B" + "register": "Vt.8B" } }, "Architectures": [ @@ -103399,10 +306639,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s16", + "name": "vst1_mf8_x4", "arguments": [ - "int16_t * ptr", - "int16x8_t val" + "int8_t * ptr", + "int8x8x4_t val" ], "return_type": { "value": "void" @@ -103411,13 +306651,20 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.8H" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { + "register": "Vt4.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -103428,10 +306675,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s16_x2", + "name": "vst1_p16", "arguments": [ - "int16_t * ptr", - "int16x8x2_t val" + "poly16_t * ptr", + "poly16x4_t val" ], "return_type": { "value": "void" @@ -103441,7 +306688,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.8H" + "register": "Vt.4H" } }, "Architectures": [ @@ -103457,10 +306704,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s16_x3", + "name": "vst1_p16_x2", "arguments": [ - "int16_t * ptr", - "int16x8x3_t val" + "poly16_t * ptr", + "poly16x4x2_t val" ], "return_type": { "value": "void" @@ -103469,8 +306716,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -103486,10 +306736,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s16_x4", + "name": "vst1_p16_x3", "arguments": [ - "int16_t * ptr", - "int16x8x4_t val" + "poly16_t * ptr", + "poly16x4x3_t val" ], "return_type": { "value": "void" @@ -103498,8 +306748,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ @@ -103515,10 +306771,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s32", + "name": "vst1_p16_x4", "arguments": [ - "int32_t * ptr", - "int32x4_t val" + "poly16_t * ptr", + "poly16x4x4_t val" ], "return_type": { "value": "void" @@ -103527,8 +306783,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.4S" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ @@ -103544,10 +306809,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s32_x2", + "name": "vst1_p64", "arguments": [ - "int32_t * ptr", - "int32x4x2_t val" + "poly64_t * ptr", + "poly64x1_t val" ], "return_type": { "value": "void" @@ -103557,11 +306822,10 @@ "register": "Xn" }, "val": { - "register": "Vt2.4S" + "register": "Vt.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -103573,10 +306837,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s32_x3", + "name": "vst1_p64_x2", "arguments": [ - "int32_t * ptr", - "int32x4x3_t val" + "poly64_t * ptr", + "poly64x1x2_t val" ], "return_type": { "value": "void" @@ -103585,12 +306849,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4S" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -103602,10 +306868,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s32_x4", + "name": "vst1_p64_x3", "arguments": [ - "int32_t * ptr", - "int32x4x4_t val" + "poly64_t * ptr", + "poly64x1x3_t val" ], "return_type": { "value": "void" @@ -103614,12 +306880,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4S" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -103631,10 +306902,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s64", + "name": "vst1_p64_x4", "arguments": [ - "int64_t * ptr", - "int64x2_t val" + "poly64_t * ptr", + "poly64x1x4_t val" ], "return_type": { "value": "void" @@ -103643,12 +306914,20 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -103660,10 +306939,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s64_x2", + "name": "vst1_p8", "arguments": [ - "int64_t * ptr", - "int64x2x2_t val" + "poly8_t * ptr", + "poly8x8_t val" ], "return_type": { "value": "void" @@ -103673,7 +306952,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.2D" + "register": "Vt.8B" } }, "Architectures": [ @@ -103689,10 +306968,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s64_x3", + "name": "vst1_p8_x2", "arguments": [ - "int64_t * ptr", - "int64x2x3_t val" + "poly8_t * ptr", + "poly8x8x2_t val" ], "return_type": { "value": "void" @@ -103701,8 +306980,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2D" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" } }, "Architectures": [ @@ -103718,10 +307000,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s64_x4", + "name": "vst1_p8_x3", "arguments": [ - "int64_t * ptr", - "int64x2x4_t val" + "poly8_t * ptr", + "poly8x8x3_t val" ], "return_type": { "value": "void" @@ -103730,8 +307012,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2D" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" } }, "Architectures": [ @@ -103747,10 +307035,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s8", + "name": "vst1_p8_x4", "arguments": [ - "int8_t * ptr", - "int8x16_t val" + "poly8_t * ptr", + "poly8x8x4_t val" ], "return_type": { "value": "void" @@ -103759,8 +307047,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { + "register": "Vt4.8B" } }, "Architectures": [ @@ -103776,10 +307073,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s8_x2", + "name": "vst1_s16", "arguments": [ - "int8_t * ptr", - "int8x16x2_t val" + "int16_t * ptr", + "int16x4_t val" ], "return_type": { "value": "void" @@ -103789,7 +307086,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.16B" + "register": "Vt.4H" } }, "Architectures": [ @@ -103805,10 +307102,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s8_x3", + "name": "vst1_s16_x2", "arguments": [ - "int8_t * ptr", - "int8x16x3_t val" + "int16_t * ptr", + "int16x4x2_t val" ], "return_type": { "value": "void" @@ -103817,8 +307114,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.16B" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -103834,10 +307134,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s8_x4", + "name": "vst1_s16_x3", "arguments": [ - "int8_t * ptr", - "int8x16x4_t val" + "int16_t * ptr", + "int16x4x3_t val" ], "return_type": { "value": "void" @@ -103846,8 +307146,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.16B" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ @@ -103863,10 +307169,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u16", + "name": "vst1_s16_x4", "arguments": [ - "uint16_t * ptr", - "uint16x8_t val" + "int16_t * ptr", + "int16x4x4_t val" ], "return_type": { "value": "void" @@ -103875,8 +307181,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ @@ -103892,10 +307207,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u16_x2", + "name": "vst1_s32", "arguments": [ - "uint16_t * ptr", - "uint16x8x2_t val" + "int32_t * ptr", + "int32x2_t val" ], "return_type": { "value": "void" @@ -103905,7 +307220,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.8H" + "register": "Vt.2S" } }, "Architectures": [ @@ -103921,10 +307236,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u16_x3", + "name": "vst1_s32_x2", "arguments": [ - "uint16_t * ptr", - "uint16x8x3_t val" + "int32_t * ptr", + "int32x2x2_t val" ], "return_type": { "value": "void" @@ -103933,8 +307248,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8H" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" } }, "Architectures": [ @@ -103950,10 +307268,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u16_x4", + "name": "vst1_s32_x3", "arguments": [ - "uint16_t * ptr", - "uint16x8x4_t val" + "int32_t * ptr", + "int32x2x3_t val" ], "return_type": { "value": "void" @@ -103962,8 +307280,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8H" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" } }, "Architectures": [ @@ -103979,10 +307303,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u32", + "name": "vst1_s32_x4", "arguments": [ - "uint32_t * ptr", - "uint32x4_t val" + "int32_t * ptr", + "int32x2x4_t val" ], "return_type": { "value": "void" @@ -103991,8 +307315,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { + "register": "Vt4.2S" } }, "Architectures": [ @@ -104008,10 +307341,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u32_x2", + "name": "vst1_s64", "arguments": [ - "uint32_t * ptr", - "uint32x4x2_t val" + "int64_t * ptr", + "int64x1_t val" ], "return_type": { "value": "void" @@ -104021,7 +307354,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.4S" + "register": "Vt.1D" } }, "Architectures": [ @@ -104037,10 +307370,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u32_x3", + "name": "vst1_s64_x2", "arguments": [ - "uint32_t * ptr", - "uint32x4x3_t val" + "int64_t * ptr", + "int64x1x2_t val" ], "return_type": { "value": "void" @@ -104049,8 +307382,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4S" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ @@ -104066,10 +307402,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u32_x4", + "name": "vst1_s64_x3", "arguments": [ - "uint32_t * ptr", - "uint32x4x4_t val" + "int64_t * ptr", + "int64x1x3_t val" ], "return_type": { "value": "void" @@ -104078,8 +307414,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4S" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ @@ -104095,10 +307437,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u64", + "name": "vst1_s64_x4", "arguments": [ - "uint64_t * ptr", - "uint64x2_t val" + "int64_t * ptr", + "int64x1x4_t val" ], "return_type": { "value": "void" @@ -104107,8 +307449,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ @@ -104124,10 +307475,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u64_x2", + "name": "vst1_s8", "arguments": [ - "uint64_t * ptr", - "uint64x2x2_t val" + "int8_t * ptr", + "int8x8_t val" ], "return_type": { "value": "void" @@ -104137,7 +307488,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.2D" + "register": "Vt.8B" } }, "Architectures": [ @@ -104153,10 +307504,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u64_x3", + "name": "vst1_s8_x2", "arguments": [ - "uint64_t * ptr", - "uint64x2x3_t val" + "int8_t * ptr", + "int8x8x2_t val" ], "return_type": { "value": "void" @@ -104165,8 +307516,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2D" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" } }, "Architectures": [ @@ -104182,10 +307536,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u64_x4", + "name": "vst1_s8_x3", "arguments": [ - "uint64_t * ptr", - "uint64x2x4_t val" + "int8_t * ptr", + "int8x8x3_t val" ], "return_type": { "value": "void" @@ -104194,8 +307548,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2D" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" } }, "Architectures": [ @@ -104211,10 +307571,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u8", + "name": "vst1_s8_x4", "arguments": [ - "uint8_t * ptr", - "uint8x16_t val" + "int8_t * ptr", + "int8x8x4_t val" ], "return_type": { "value": "void" @@ -104223,8 +307583,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { + "register": "Vt4.8B" } }, "Architectures": [ @@ -104240,10 +307609,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u8_x2", + "name": "vst1_u16", "arguments": [ - "uint8_t * ptr", - "uint8x16x2_t val" + "uint16_t * ptr", + "uint16x4_t val" ], "return_type": { "value": "void" @@ -104253,7 +307622,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.16B" + "register": "Vt.4H" } }, "Architectures": [ @@ -104269,10 +307638,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u8_x3", + "name": "vst1_u16_x2", "arguments": [ - "uint8_t * ptr", - "uint8x16x3_t val" + "uint16_t * ptr", + "uint16x4x2_t val" ], "return_type": { "value": "void" @@ -104281,8 +307650,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.16B" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -104298,10 +307670,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u8_x4", + "name": "vst1_u16_x3", "arguments": [ - "uint8_t * ptr", - "uint8x16x4_t val" + "uint16_t * ptr", + "uint16x4x3_t val" ], "return_type": { "value": "void" @@ -104310,8 +307682,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.16B" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ @@ -104327,10 +307705,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst2_f16", + "name": "vst1_u16_x4", "arguments": [ - "float16_t * ptr", - "float16x4x2_t val" + "uint16_t * ptr", + "uint16x4x4_t val" ], "return_type": { "value": "void" @@ -104339,8 +307717,17 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ @@ -104350,16 +307737,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_f32", + "name": "vst1_u32", "arguments": [ - "float32_t * ptr", - "float32x2x2_t val" + "uint32_t * ptr", + "uint32x2_t val" ], "return_type": { "value": "void" @@ -104369,7 +307756,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.2S" + "register": "Vt.2S" } }, "Architectures": [ @@ -104379,16 +307766,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_f64", + "name": "vst1_u32_x2", "arguments": [ - "float64_t * ptr", - "float64x1x2_t val" + "uint32_t * ptr", + "uint32x2x2_t val" ], "return_type": { "value": "void" @@ -104397,11 +307784,16 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.1D" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -104412,25 +307804,26 @@ }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_f16", + "name": "vst1_u32_x3", "arguments": [ - "float16_t * ptr", - "float16x4x2_t val", - "const int lane" + "uint32_t * ptr", + "uint32x2x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.4H" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" } }, "Architectures": [ @@ -104440,31 +307833,35 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_f32", + "name": "vst1_u32_x4", "arguments": [ - "float32_t * ptr", - "float32x2x2_t val", - "const int lane" + "uint32_t * ptr", + "uint32x2x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { + "register": "Vt4.2S" } }, "Architectures": [ @@ -104474,63 +307871,58 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_f64", + "name": "vst1_u64", "arguments": [ - "float64_t * ptr", - "float64x1x2_t val", - "const int lane" + "uint64_t * ptr", + "uint64x1_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.1D" + "register": "Vt.1D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_p16", + "name": "vst1_u64_x2", "arguments": [ - "poly16_t * ptr", - "poly16x4x2_t val", - "const int lane" + "uint64_t * ptr", + "uint64x1x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.4H" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ @@ -104540,63 +307932,70 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_p64", + "name": "vst1_u64_x3", "arguments": [ - "poly64_t * ptr", - "poly64x1x2_t val", - "const int lane" + "uint64_t * ptr", + "uint64x1x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_p8", + "name": "vst1_u64_x4", "arguments": [ - "poly8_t * ptr", - "poly8x8x2_t val", - "const int lane" + "uint64_t * ptr", + "uint64x1x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.8B" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ @@ -104606,31 +308005,26 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_s16", + "name": "vst1_u8", "arguments": [ - "int16_t * ptr", - "int16x4x2_t val", - "const int lane" + "uint8_t * ptr", + "uint8x8_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.4H" + "register": "Vt.8B" } }, "Architectures": [ @@ -104640,31 +308034,29 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_s32", + "name": "vst1_u8_x2", "arguments": [ - "int32_t * ptr", - "int32x2x2_t val", - "const int lane" + "uint8_t * ptr", + "uint8x8x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.2S" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" } }, "Architectures": [ @@ -104674,63 +308066,70 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_s64", + "name": "vst1_u8_x3", "arguments": [ - "int64_t * ptr", - "int64x1x2_t val", - "const int lane" + "uint8_t * ptr", + "uint8x8x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.1D" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_s8", + "name": "vst1_u8_x4", "arguments": [ - "int8_t * ptr", - "int8x8x2_t val", - "const int lane" + "uint8_t * ptr", + "uint8x8x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { + "register": "Vt4.8B" } }, "Architectures": [ @@ -104740,31 +308139,26 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_u16", + "name": "vst1q_f16", "arguments": [ - "uint16_t * ptr", - "uint16x4x2_t val", - "const int lane" + "float16_t * ptr", + "float16x8_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.4H" + "register": "Vt.8H" } }, "Architectures": [ @@ -104774,31 +308168,29 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_u32", + "name": "vst1q_f16_x2", "arguments": [ - "uint32_t * ptr", - "uint32x2x2_t val", - "const int lane" + "float16_t * ptr", + "float16x8x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.2S" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" } }, "Architectures": [ @@ -104808,63 +308200,70 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_u64", + "name": "vst1q_f16_x3", "arguments": [ - "uint64_t * ptr", - "uint64x1x2_t val", - "const int lane" + "float16_t * ptr", + "float16x8x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.1D" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_u8", + "name": "vst1q_f16_x4", "arguments": [ - "uint8_t * ptr", - "uint8x8x2_t val", - "const int lane" + "float16_t * ptr", + "float16x8x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.8B" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { + "register": "Vt4.8H" } }, "Architectures": [ @@ -104874,16 +308273,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_p16", + "name": "vst1q_f32", "arguments": [ - "poly16_t * ptr", - "poly16x4x2_t val" + "float32_t * ptr", + "float32x4_t val" ], "return_type": { "value": "void" @@ -104893,7 +308292,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.4H" + "register": "Vt.4S" } }, "Architectures": [ @@ -104903,16 +308302,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_p64", + "name": "vst1q_f32_x2", "arguments": [ - "poly64_t * ptr", - "poly64x1x2_t val" + "float32_t * ptr", + "float32x4x2_t val" ], "return_type": { "value": "void" @@ -104921,11 +308320,15 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.1D" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -104937,10 +308340,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst2_p8", + "name": "vst1q_f32_x3", "arguments": [ - "poly8_t * ptr", - "poly8x8x2_t val" + "float32_t * ptr", + "float32x4x3_t val" ], "return_type": { "value": "void" @@ -104949,8 +308352,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.8B" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" } }, "Architectures": [ @@ -104960,16 +308369,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_s16", + "name": "vst1q_f32_x4", "arguments": [ - "int16_t * ptr", - "int16x4x2_t val" + "float32_t * ptr", + "float32x4x4_t val" ], "return_type": { "value": "void" @@ -104978,8 +308387,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.4H" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { + "register": "Vt4.4S" } }, "Architectures": [ @@ -104989,16 +308407,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_s32", + "name": "vst1q_f64", "arguments": [ - "int32_t * ptr", - "int32x2x2_t val" + "float64_t * ptr", + "float64x2_t val" ], "return_type": { "value": "void" @@ -105008,26 +308426,24 @@ "register": "Xn" }, "val": { - "register": "Vt2.2S" + "register": "Vt.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_s64", + "name": "vst1q_f64_x2", "arguments": [ - "int64_t * ptr", - "int64x1x2_t val" + "float64_t * ptr", + "float64x2x2_t val" ], "return_type": { "value": "void" @@ -105036,13 +308452,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -105053,10 +308470,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst2_s8", + "name": "vst1q_f64_x3", "arguments": [ - "int8_t * ptr", - "int8x8x2_t val" + "float64_t * ptr", + "float64x2x3_t val" ], "return_type": { "value": "void" @@ -105065,27 +308482,31 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.8B" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_u16", + "name": "vst1q_f64_x4", "arguments": [ - "uint16_t * ptr", - "uint16x4x2_t val" + "float64_t * ptr", + "float64x2x4_t val" ], "return_type": { "value": "void" @@ -105094,37 +308515,49 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.4H" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { + "register": "Vt4.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_u32", + "name": "vst1q_lane_f16", "arguments": [ - "uint32_t * ptr", - "uint32x2x2_t val" + "float16_t * ptr", + "float16x8_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.2S" + "register": "Vt.8H" } }, "Architectures": [ @@ -105134,26 +308567,31 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_u64", + "name": "vst1q_lane_f32", "arguments": [ - "uint64_t * ptr", - "uint64x1x2_t val" + "float32_t * ptr", + "float32x4_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.1D" + "register": "Vt.4S" } }, "Architectures": [ @@ -105169,49 +308607,57 @@ }, { "SIMD_ISA": "Neon", - "name": "vst2_u8", + "name": "vst1q_lane_f64", "arguments": [ - "uint8_t * ptr", - "uint8x8x2_t val" + "float64_t * ptr", + "float64x2_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.8B" + "register": "Vt.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_f16", + "name": "vst1q_lane_p16", "arguments": [ - "float16_t * ptr", - "float16x8x2_t val" + "poly16_t * ptr", + "poly16x8_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.8H" + "register": "Vt.8H" } }, "Architectures": [ @@ -105221,72 +308667,83 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_f32", + "name": "vst1q_lane_p64", "arguments": [ - "float32_t * ptr", - "float32x4x2_t val" + "poly64_t * ptr", + "poly64x2_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.4S" + "register": "Vt.2D" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_f64", + "name": "vst1q_lane_p8", "arguments": [ - "float64_t * ptr", - "float64x2x2_t val" + "poly8_t * ptr", + "poly8x16_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 15 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.2D" + "register": "Vt.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_f16", + "name": "vst1q_lane_s16", "arguments": [ - "float16_t * ptr", - "float16x8x2_t val", + "int16_t * ptr", + "int16x8_t val", "const int lane" ], "return_type": { @@ -105301,7 +308758,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.8H" + "register": "Vt.8H" } }, "Architectures": [ @@ -105311,16 +308768,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_f32", + "name": "vst1q_lane_s32", "arguments": [ - "float32_t * ptr", - "float32x4x2_t val", + "int32_t * ptr", + "int32x4_t val", "const int lane" ], "return_type": { @@ -105335,7 +308792,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.4S" + "register": "Vt.4S" } }, "Architectures": [ @@ -105345,16 +308802,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_f64", + "name": "vst1q_lane_s64", "arguments": [ - "float64_t * ptr", - "float64x2x2_t val", + "int64_t * ptr", + "int64x2_t val", "const int lane" ], "return_type": { @@ -105363,30 +308820,32 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 2 + "maximum": 1 }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.2D" + "register": "Vt.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_p16", + "name": "vst1q_lane_s8", "arguments": [ - "poly16_t * ptr", - "poly16x8x2_t val", + "int8_t * ptr", + "int8x16_t val", "const int lane" ], "return_type": { @@ -105395,13 +308854,13 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 15 }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.8H" + "register": "Vt.16B" } }, "Architectures": [ @@ -105411,16 +308870,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_p64", + "name": "vst1q_lane_u16", "arguments": [ - "poly64_t * ptr", - "poly64x2x2_t val", + "uint16_t * ptr", + "uint16x8_t val", "const int lane" ], "return_type": { @@ -105429,30 +308888,32 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 7 }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.2D" + "register": "Vt.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_p8", + "name": "vst1q_lane_u32", "arguments": [ - "poly8_t * ptr", - "poly8x16x2_t val", + "uint32_t * ptr", + "uint32x4_t val", "const int lane" ], "return_type": { @@ -105461,30 +308922,32 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 15 + "maximum": 3 }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.16B" + "register": "Vt.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_s16", + "name": "vst1q_lane_u64", "arguments": [ - "int16_t * ptr", - "int16x8x2_t val", + "uint64_t * ptr", + "uint64x2_t val", "const int lane" ], "return_type": { @@ -105493,13 +308956,13 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 1 }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.8H" + "register": "Vt.2D" } }, "Architectures": [ @@ -105509,16 +308972,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_s32", + "name": "vst1q_lane_u8", "arguments": [ - "int32_t * ptr", - "int32x4x2_t val", + "uint8_t * ptr", + "uint8x16_t val", "const int lane" ], "return_type": { @@ -105527,13 +308990,13 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 3 + "maximum": 15 }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.4S" + "register": "Vt.16B" } }, "Architectures": [ @@ -105543,31 +309006,35 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_s64", + "name": "vst1q_mf8_x4", "arguments": [ - "int64_t * ptr", - "int64x2x2_t val", - "const int lane" + "int8_t * ptr", + "int8x16x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.2D" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { + "register": "Vt4.16B" } }, "Architectures": [ @@ -105575,62 +309042,57 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_s8", + "name": "vst1q_p16", "arguments": [ - "int8_t * ptr", - "int8x16x2_t val", - "const int lane" + "poly16_t * ptr", + "poly16x8_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.16B" + "register": "Vt.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_u16", + "name": "vst1q_p16_x2", "arguments": [ - "uint16_t * ptr", - "uint16x8x2_t val", - "const int lane" + "poly16_t * ptr", + "poly16x8x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { "register": "Vt2.8H" } }, @@ -105641,31 +309103,32 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_u32", + "name": "vst1q_p16_x3", "arguments": [ - "uint32_t * ptr", - "uint32x4x2_t val", - "const int lane" + "poly16_t * ptr", + "poly16x8x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.4S" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ @@ -105675,80 +309138,82 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_u64", + "name": "vst1q_p16_x4", "arguments": [ - "uint64_t * ptr", - "uint64x2x2_t val", - "const int lane" + "poly16_t * ptr", + "poly16x8x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.2D" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { + "register": "Vt4.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_u8", + "name": "vst1q_p64", "arguments": [ - "uint8_t * ptr", - "uint8x16x2_t val", - "const int lane" + "poly64_t * ptr", + "poly64x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.16B" + "register": "Vt.2D" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_p16", + "name": "vst1q_p64_x2", "arguments": [ - "poly16_t * ptr", - "poly16x8x2_t val" + "poly64_t * ptr", + "poly64x2x2_t val" ], "return_type": { "value": "void" @@ -105757,27 +309222,29 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.8H" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_p64", + "name": "vst1q_p64_x3", "arguments": [ "poly64_t * ptr", - "poly64x2x2_t val" + "poly64x2x3_t val" ], "return_type": { "value": "void" @@ -105786,25 +309253,33 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_p8", + "name": "vst1q_p64_x4", "arguments": [ - "poly8_t * ptr", - "poly8x16x2_t val" + "poly64_t * ptr", + "poly64x2x4_t val" ], "return_type": { "value": "void" @@ -105813,27 +309288,35 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.16B" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { + "register": "Vt4.2D" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_s16", + "name": "vst1q_p8", "arguments": [ - "int16_t * ptr", - "int16x8x2_t val" + "poly8_t * ptr", + "poly8x16_t val" ], "return_type": { "value": "void" @@ -105843,7 +309326,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.8H" + "register": "Vt.16B" } }, "Architectures": [ @@ -105853,16 +309336,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_s32", + "name": "vst1q_p8_x2", "arguments": [ - "int32_t * ptr", - "int32x4x2_t val" + "poly8_t * ptr", + "poly8x16x2_t val" ], "return_type": { "value": "void" @@ -105871,8 +309354,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.4S" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" } }, "Architectures": [ @@ -105882,16 +309368,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_s64", + "name": "vst1q_p8_x3", "arguments": [ - "int64_t * ptr", - "int64x2x2_t val" + "poly8_t * ptr", + "poly8x16x3_t val" ], "return_type": { "value": "void" @@ -105900,25 +309386,33 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.2D" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_s8", + "name": "vst1q_p8_x4", "arguments": [ - "int8_t * ptr", - "int8x16x2_t val" + "poly8_t * ptr", + "poly8x16x4_t val" ], "return_type": { "value": "void" @@ -105927,8 +309421,17 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { + "register": "Vt4.16B" } }, "Architectures": [ @@ -105938,16 +309441,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_u16", + "name": "vst1q_s16", "arguments": [ - "uint16_t * ptr", - "uint16x8x2_t val" + "int16_t * ptr", + "int16x8_t val" ], "return_type": { "value": "void" @@ -105957,7 +309460,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.8H" + "register": "Vt.8H" } }, "Architectures": [ @@ -105967,16 +309470,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_u32", + "name": "vst1q_s16_x2", "arguments": [ - "uint32_t * ptr", - "uint32x4x2_t val" + "int16_t * ptr", + "int16x8x2_t val" ], "return_type": { "value": "void" @@ -105985,8 +309488,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.4S" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" } }, "Architectures": [ @@ -105996,16 +309502,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_u64", + "name": "vst1q_s16_x3", "arguments": [ - "uint64_t * ptr", - "uint64x2x2_t val" + "int16_t * ptr", + "int16x8x3_t val" ], "return_type": { "value": "void" @@ -106014,25 +309520,33 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.2D" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_u8", + "name": "vst1q_s16_x4", "arguments": [ - "uint8_t * ptr", - "uint8x16x2_t val" + "int16_t * ptr", + "int16x8x4_t val" ], "return_type": { "value": "void" @@ -106041,8 +309555,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.16B" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { + "register": "Vt4.8H" } }, "Architectures": [ @@ -106052,16 +309575,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_f16", + "name": "vst1q_s32", "arguments": [ - "float16_t * ptr", - "float16x4x3_t val" + "int32_t * ptr", + "int32x4_t val" ], "return_type": { "value": "void" @@ -106071,7 +309594,7 @@ "register": "Xn" }, "val": { - "register": "Vt3.4H" + "register": "Vt.4S" } }, "Architectures": [ @@ -106081,16 +309604,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_f32", + "name": "vst1q_s32_x2", "arguments": [ - "float32_t * ptr", - "float32x2x3_t val" + "int32_t * ptr", + "int32x4x2_t val" ], "return_type": { "value": "void" @@ -106099,8 +309622,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2S" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" } }, "Architectures": [ @@ -106110,16 +309636,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_f64", + "name": "vst1q_s32_x3", "arguments": [ - "float64_t * ptr", - "float64x1x3_t val" + "int32_t * ptr", + "int32x4x3_t val" ], "return_type": { "value": "void" @@ -106128,11 +309654,19 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.1D" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -106143,25 +309677,29 @@ }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_f16", + "name": "vst1q_s32_x4", "arguments": [ - "float16_t * ptr", - "float16x4x3_t val", - "const int lane" + "int32_t * ptr", + "int32x4x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4H" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { + "register": "Vt4.4S" } }, "Architectures": [ @@ -106171,31 +309709,26 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_f32", + "name": "vst1q_s64", "arguments": [ - "float32_t * ptr", - "float32x2x3_t val", - "const int lane" + "int64_t * ptr", + "int64x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt3.2S" + "register": "Vt.2D" } }, "Architectures": [ @@ -106205,63 +309738,64 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_f64", + "name": "vst1q_s64_x2", "arguments": [ - "float64_t * ptr", - "float64x1x3_t val", - "const int lane" + "int64_t * ptr", + "int64x2x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_p16", + "name": "vst1q_s64_x3", "arguments": [ - "poly16_t * ptr", - "poly16x4x3_t val", - "const int lane" + "int64_t * ptr", + "int64x2x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4H" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ @@ -106271,63 +309805,64 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_p64", + "name": "vst1q_s64_x4", "arguments": [ - "poly64_t * ptr", - "poly64x1x3_t val", - "const int lane" + "int64_t * ptr", + "int64x2x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { + "register": "Vt4.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_p8", + "name": "vst1q_s8", "arguments": [ - "poly8_t * ptr", - "poly8x8x3_t val", - "const int lane" + "int8_t * ptr", + "int8x16_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt3.8B" + "register": "Vt.16B" } }, "Architectures": [ @@ -106337,31 +309872,29 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_s16", + "name": "vst1q_s8_x2", "arguments": [ - "int16_t * ptr", - "int16x4x3_t val", - "const int lane" + "int8_t * ptr", + "int8x16x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4H" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" } }, "Architectures": [ @@ -106371,31 +309904,32 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_s32", + "name": "vst1q_s8_x3", "arguments": [ - "int32_t * ptr", - "int32x2x3_t val", - "const int lane" + "int8_t * ptr", + "int8x16x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2S" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" } }, "Architectures": [ @@ -106405,63 +309939,64 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_s64", + "name": "vst1q_s8_x4", "arguments": [ - "int64_t * ptr", - "int64x1x3_t val", - "const int lane" + "int8_t * ptr", + "int8x16x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.1D" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { + "register": "Vt4.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_s8", + "name": "vst1q_u16", "arguments": [ - "int8_t * ptr", - "int8x8x3_t val", - "const int lane" + "uint16_t * ptr", + "uint16x8_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt3.8B" + "register": "Vt.8H" } }, "Architectures": [ @@ -106471,31 +310006,29 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_u16", + "name": "vst1q_u16_x2", "arguments": [ "uint16_t * ptr", - "uint16x4x3_t val", - "const int lane" + "uint16x8x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4H" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" } }, "Architectures": [ @@ -106505,31 +310038,32 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_u32", + "name": "vst1q_u16_x3", "arguments": [ - "uint32_t * ptr", - "uint32x2x3_t val", - "const int lane" + "uint16_t * ptr", + "uint16x8x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2S" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ @@ -106539,63 +310073,64 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_u64", + "name": "vst1q_u16_x4", "arguments": [ - "uint64_t * ptr", - "uint64x1x3_t val", - "const int lane" + "uint16_t * ptr", + "uint16x8x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.1D" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { + "register": "Vt4.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_u8", + "name": "vst1q_u32", "arguments": [ - "uint8_t * ptr", - "uint8x8x3_t val", - "const int lane" + "uint32_t * ptr", + "uint32x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt3.8B" + "register": "Vt.4S" } }, "Architectures": [ @@ -106605,16 +310140,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_p16", + "name": "vst1q_u32_x2", "arguments": [ - "poly16_t * ptr", - "poly16x4x3_t val" + "uint32_t * ptr", + "uint32x4x2_t val" ], "return_type": { "value": "void" @@ -106623,8 +310158,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4H" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" } }, "Architectures": [ @@ -106634,16 +310172,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_p64", + "name": "vst1q_u32_x3", "arguments": [ - "poly64_t * ptr", - "poly64x1x3_t val" + "uint32_t * ptr", + "uint32x4x3_t val" ], "return_type": { "value": "void" @@ -106652,11 +310190,18 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.1D" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -106668,10 +310213,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst3_p8", + "name": "vst1q_u32_x4", "arguments": [ - "poly8_t * ptr", - "poly8x8x3_t val" + "uint32_t * ptr", + "uint32x4x4_t val" ], "return_type": { "value": "void" @@ -106680,8 +310225,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8B" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { + "register": "Vt4.4S" } }, "Architectures": [ @@ -106691,16 +310245,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_s16", + "name": "vst1q_u64", "arguments": [ - "int16_t * ptr", - "int16x4x3_t val" + "uint64_t * ptr", + "uint64x2_t val" ], "return_type": { "value": "void" @@ -106710,7 +310264,7 @@ "register": "Xn" }, "val": { - "register": "Vt3.4H" + "register": "Vt.2D" } }, "Architectures": [ @@ -106720,16 +310274,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_s32", + "name": "vst1q_u64_x2", "arguments": [ - "int32_t * ptr", - "int32x2x3_t val" + "uint64_t * ptr", + "uint64x2x2_t val" ], "return_type": { "value": "void" @@ -106738,8 +310292,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2S" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ @@ -106749,16 +310306,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_s64", + "name": "vst1q_u64_x3", "arguments": [ - "int64_t * ptr", - "int64x1x3_t val" + "uint64_t * ptr", + "uint64x2x3_t val" ], "return_type": { "value": "void" @@ -106767,8 +310324,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ @@ -106784,10 +310347,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst3_s8", + "name": "vst1q_u64_x4", "arguments": [ - "int8_t * ptr", - "int8x8x3_t val" + "uint64_t * ptr", + "uint64x2x4_t val" ], "return_type": { "value": "void" @@ -106796,8 +310359,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8B" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { + "register": "Vt4.2D" } }, "Architectures": [ @@ -106807,16 +310379,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_u16", + "name": "vst1q_u8", "arguments": [ - "uint16_t * ptr", - "uint16x4x3_t val" + "uint8_t * ptr", + "uint8x16_t val" ], "return_type": { "value": "void" @@ -106826,7 +310398,7 @@ "register": "Xn" }, "val": { - "register": "Vt3.4H" + "register": "Vt.16B" } }, "Architectures": [ @@ -106836,16 +310408,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_u32", + "name": "vst1q_u8_x2", "arguments": [ - "uint32_t * ptr", - "uint32x2x3_t val" + "uint8_t * ptr", + "uint8x16x2_t val" ], "return_type": { "value": "void" @@ -106854,8 +310426,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2S" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" } }, "Architectures": [ @@ -106865,16 +310440,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_u64", + "name": "vst1q_u8_x3", "arguments": [ - "uint64_t * ptr", - "uint64x1x3_t val" + "uint8_t * ptr", + "uint8x16x3_t val" ], "return_type": { "value": "void" @@ -106883,8 +310458,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.1D" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" } }, "Architectures": [ @@ -106900,10 +310481,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst3_u8", + "name": "vst1q_u8_x4", "arguments": [ "uint8_t * ptr", - "uint8x8x3_t val" + "uint8x16x4_t val" ], "return_type": { "value": "void" @@ -106912,8 +310493,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8B" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { + "register": "Vt4.16B" } }, "Architectures": [ @@ -106923,16 +310513,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_f16", + "name": "vst2_f16", "arguments": [ "float16_t * ptr", - "float16x8x3_t val" + "float16x4x2_t val" ], "return_type": { "value": "void" @@ -106941,8 +310531,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -106952,16 +310545,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_f32", + "name": "vst2_f32", "arguments": [ "float32_t * ptr", - "float32x4x3_t val" + "float32x2x2_t val" ], "return_type": { "value": "void" @@ -106970,8 +310563,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" } }, "Architectures": [ @@ -106981,16 +310577,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_f64", + "name": "vst2_f64", "arguments": [ "float64_t * ptr", - "float64x2x3_t val" + "float64x1x2_t val" ], "return_type": { "value": "void" @@ -106999,8 +310595,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ @@ -107008,16 +310607,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_f16", + "name": "vst2_lane_f16", "arguments": [ "float16_t * ptr", - "float16x8x3_t val", + "float16x4x2_t val", "const int lane" ], "return_type": { @@ -107026,13 +310625,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -107042,16 +310644,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_f32", + "name": "vst2_lane_f32", "arguments": [ "float32_t * ptr", - "float32x4x3_t val", + "float32x2x2_t val", "const int lane" ], "return_type": { @@ -107060,13 +310662,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 3 + "maximum": 1 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" } }, "Architectures": [ @@ -107076,16 +310681,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_f64", + "name": "vst2_lane_f64", "arguments": [ "float64_t * ptr", - "float64x2x3_t val", + "float64x1x2_t val", "const int lane" ], "return_type": { @@ -107094,13 +310699,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 0 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ @@ -107108,16 +310716,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_p16", + "name": "vst2_lane_p16", "arguments": [ "poly16_t * ptr", - "poly16x8x3_t val", + "poly16x4x2_t val", "const int lane" ], "return_type": { @@ -107126,13 +310734,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -107142,16 +310753,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_p64", + "name": "vst2_lane_p64", "arguments": [ "poly64_t * ptr", - "poly64x2x3_t val", + "poly64x1x2_t val", "const int lane" ], "return_type": { @@ -107160,13 +310771,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 0 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ @@ -107174,16 +310788,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_p8", + "name": "vst2_lane_p8", "arguments": [ "poly8_t * ptr", - "poly8x16x3_t val", + "poly8x8x2_t val", "const int lane" ], "return_type": { @@ -107192,13 +310806,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 15 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" } }, "Architectures": [ @@ -107208,16 +310825,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_s16", + "name": "vst2_lane_s16", "arguments": [ "int16_t * ptr", - "int16x8x3_t val", + "int16x4x2_t val", "const int lane" ], "return_type": { @@ -107226,13 +310843,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -107242,16 +310862,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_s32", + "name": "vst2_lane_s32", "arguments": [ "int32_t * ptr", - "int32x4x3_t val", + "int32x2x2_t val", "const int lane" ], "return_type": { @@ -107260,13 +310880,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 3 + "maximum": 1 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" } }, "Architectures": [ @@ -107276,16 +310899,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_s64", + "name": "vst2_lane_s64", "arguments": [ "int64_t * ptr", - "int64x2x3_t val", + "int64x1x2_t val", "const int lane" ], "return_type": { @@ -107294,13 +310917,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 0 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ @@ -107308,16 +310934,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_s8", + "name": "vst2_lane_s8", "arguments": [ "int8_t * ptr", - "int8x16x3_t val", + "int8x8x2_t val", "const int lane" ], "return_type": { @@ -107326,13 +310952,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 15 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" } }, "Architectures": [ @@ -107342,16 +310971,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_u16", + "name": "vst2_lane_u16", "arguments": [ "uint16_t * ptr", - "uint16x8x3_t val", + "uint16x4x2_t val", "const int lane" ], "return_type": { @@ -107360,13 +310989,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -107376,16 +311008,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_u32", + "name": "vst2_lane_u32", "arguments": [ "uint32_t * ptr", - "uint32x4x3_t val", + "uint32x2x2_t val", "const int lane" ], "return_type": { @@ -107394,13 +311026,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 3 + "maximum": 1 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" } }, "Architectures": [ @@ -107410,16 +311045,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_u64", + "name": "vst2_lane_u64", "arguments": [ "uint64_t * ptr", - "uint64x2x3_t val", + "uint64x1x2_t val", "const int lane" ], "return_type": { @@ -107428,13 +311063,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 0 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ @@ -107442,16 +311080,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_u8", + "name": "vst2_lane_u8", "arguments": [ "uint8_t * ptr", - "uint8x16x3_t val", + "uint8x8x2_t val", "const int lane" ], "return_type": { @@ -107460,13 +311098,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 15 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" } }, "Architectures": [ @@ -107476,16 +311117,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_p16", + "name": "vst2_p16", "arguments": [ "poly16_t * ptr", - "poly16x8x3_t val" + "poly16x4x2_t val" ], "return_type": { "value": "void" @@ -107494,8 +311135,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -107505,16 +311149,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_p64", + "name": "vst2_p64", "arguments": [ "poly64_t * ptr", - "poly64x2x3_t val" + "poly64x1x2_t val" ], "return_type": { "value": "void" @@ -107523,25 +311167,29 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_p8", + "name": "vst2_p8", "arguments": [ "poly8_t * ptr", - "poly8x16x3_t val" + "poly8x8x2_t val" ], "return_type": { "value": "void" @@ -107550,8 +311198,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" } }, "Architectures": [ @@ -107561,16 +311212,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_s16", + "name": "vst2_s16", "arguments": [ "int16_t * ptr", - "int16x8x3_t val" + "int16x4x2_t val" ], "return_type": { "value": "void" @@ -107579,8 +311230,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -107590,16 +311244,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_s32", + "name": "vst2_s32", "arguments": [ "int32_t * ptr", - "int32x4x3_t val" + "int32x2x2_t val" ], "return_type": { "value": "void" @@ -107608,8 +311262,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" } }, "Architectures": [ @@ -107619,16 +311276,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_s64", + "name": "vst2_s64", "arguments": [ "int64_t * ptr", - "int64x2x3_t val" + "int64x1x2_t val" ], "return_type": { "value": "void" @@ -107637,25 +311294,30 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_s8", + "name": "vst2_s8", "arguments": [ "int8_t * ptr", - "int8x16x3_t val" + "int8x8x2_t val" ], "return_type": { "value": "void" @@ -107664,8 +311326,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" } }, "Architectures": [ @@ -107675,16 +311340,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_u16", + "name": "vst2_u16", "arguments": [ "uint16_t * ptr", - "uint16x8x3_t val" + "uint16x4x2_t val" ], "return_type": { "value": "void" @@ -107693,8 +311358,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -107704,16 +311372,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_u32", + "name": "vst2_u32", "arguments": [ "uint32_t * ptr", - "uint32x4x3_t val" + "uint32x2x2_t val" ], "return_type": { "value": "void" @@ -107722,8 +311390,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" } }, "Architectures": [ @@ -107733,16 +311404,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_u64", + "name": "vst2_u64", "arguments": [ "uint64_t * ptr", - "uint64x2x3_t val" + "uint64x1x2_t val" ], "return_type": { "value": "void" @@ -107751,25 +311422,30 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_u8", + "name": "vst2_u8", "arguments": [ "uint8_t * ptr", - "uint8x16x3_t val" + "uint8x8x2_t val" ], "return_type": { "value": "void" @@ -107778,8 +311454,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" } }, "Architectures": [ @@ -107789,16 +311468,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_f16", + "name": "vst2q_f16", "arguments": [ "float16_t * ptr", - "float16x4x4_t val" + "float16x8x2_t val" ], "return_type": { "value": "void" @@ -107807,8 +311486,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4H" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" } }, "Architectures": [ @@ -107818,16 +311500,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_f32", + "name": "vst2q_f32", "arguments": [ "float32_t * ptr", - "float32x2x4_t val" + "float32x4x2_t val" ], "return_type": { "value": "void" @@ -107836,8 +311518,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2S" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" } }, "Architectures": [ @@ -107847,16 +311532,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_f64", + "name": "vst2q_f64", "arguments": [ "float64_t * ptr", - "float64x1x4_t val" + "float64x2x2_t val" ], "return_type": { "value": "void" @@ -107865,8 +311550,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ @@ -107874,16 +311562,16 @@ ], "instructions": [ [ - "ST1" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_f16", + "name": "vst2q_lane_f16", "arguments": [ "float16_t * ptr", - "float16x4x4_t val", + "float16x8x2_t val", "const int lane" ], "return_type": { @@ -107892,13 +311580,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 3 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4H" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" } }, "Architectures": [ @@ -107908,16 +311599,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_f32", + "name": "vst2q_lane_f32", "arguments": [ "float32_t * ptr", - "float32x2x4_t val", + "float32x4x2_t val", "const int lane" ], "return_type": { @@ -107926,13 +311617,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2S" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" } }, "Architectures": [ @@ -107942,16 +311636,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_f64", + "name": "vst2q_lane_f64", "arguments": [ "float64_t * ptr", - "float64x1x4_t val", + "float64x2x2_t val", "const int lane" ], "return_type": { @@ -107960,13 +311654,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 0 + "maximum": 1 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ @@ -107974,16 +311671,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_p16", + "name": "vst2q_lane_p16", "arguments": [ "poly16_t * ptr", - "poly16x4x4_t val", + "poly16x8x2_t val", "const int lane" ], "return_type": { @@ -107992,13 +311689,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 3 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4H" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" } }, "Architectures": [ @@ -108008,16 +311708,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_p64", + "name": "vst2q_lane_p64", "arguments": [ "poly64_t * ptr", - "poly64x1x4_t val", + "poly64x2x2_t val", "const int lane" ], "return_type": { @@ -108026,13 +311726,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 0 + "maximum": 1 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ @@ -108040,16 +311743,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_p8", + "name": "vst2q_lane_p8", "arguments": [ "poly8_t * ptr", - "poly8x8x4_t val", + "poly8x16x2_t val", "const int lane" ], "return_type": { @@ -108058,32 +311761,33 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 15 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8B" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_s16", + "name": "vst2q_lane_s16", "arguments": [ "int16_t * ptr", - "int16x4x4_t val", + "int16x8x2_t val", "const int lane" ], "return_type": { @@ -108092,13 +311796,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 3 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4H" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" } }, "Architectures": [ @@ -108108,16 +311815,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_s32", + "name": "vst2q_lane_s32", "arguments": [ "int32_t * ptr", - "int32x2x4_t val", + "int32x4x2_t val", "const int lane" ], "return_type": { @@ -108126,13 +311833,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2S" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" } }, "Architectures": [ @@ -108142,16 +311852,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_s64", + "name": "vst2q_lane_s64", "arguments": [ "int64_t * ptr", - "int64x1x4_t val", + "int64x2x2_t val", "const int lane" ], "return_type": { @@ -108160,13 +311870,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 0 + "maximum": 1 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ @@ -108174,16 +311887,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_s8", + "name": "vst2q_lane_s8", "arguments": [ "int8_t * ptr", - "int8x8x4_t val", + "int8x16x2_t val", "const int lane" ], "return_type": { @@ -108192,32 +311905,33 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 15 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8B" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_u16", + "name": "vst2q_lane_u16", "arguments": [ "uint16_t * ptr", - "uint16x4x4_t val", + "uint16x8x2_t val", "const int lane" ], "return_type": { @@ -108226,13 +311940,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 3 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4H" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" } }, "Architectures": [ @@ -108242,16 +311959,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_u32", + "name": "vst2q_lane_u32", "arguments": [ "uint32_t * ptr", - "uint32x2x4_t val", + "uint32x4x2_t val", "const int lane" ], "return_type": { @@ -108260,13 +311977,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2S" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" } }, "Architectures": [ @@ -108276,16 +311996,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_u64", + "name": "vst2q_lane_u64", "arguments": [ "uint64_t * ptr", - "uint64x1x4_t val", + "uint64x2x2_t val", "const int lane" ], "return_type": { @@ -108294,13 +312014,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 0 + "maximum": 1 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ @@ -108308,16 +312031,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_u8", + "name": "vst2q_lane_u8", "arguments": [ "uint8_t * ptr", - "uint8x8x4_t val", + "uint8x16x2_t val", "const int lane" ], "return_type": { @@ -108326,32 +312049,33 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 15 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8B" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_p16", + "name": "vst2q_p16", "arguments": [ "poly16_t * ptr", - "poly16x4x4_t val" + "poly16x8x2_t val" ], "return_type": { "value": "void" @@ -108360,8 +312084,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4H" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" } }, "Architectures": [ @@ -108371,16 +312098,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_p64", + "name": "vst2q_p64", "arguments": [ "poly64_t * ptr", - "poly64x1x4_t val" + "poly64x2x2_t val" ], "return_type": { "value": "void" @@ -108389,26 +312116,28 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "ST1" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_p8", + "name": "vst2q_p8", "arguments": [ "poly8_t * ptr", - "poly8x8x4_t val" + "poly8x16x2_t val" ], "return_type": { "value": "void" @@ -108417,8 +312146,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8B" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" } }, "Architectures": [ @@ -108428,16 +312160,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_s16", + "name": "vst2q_s16", "arguments": [ "int16_t * ptr", - "int16x4x4_t val" + "int16x8x2_t val" ], "return_type": { "value": "void" @@ -108446,8 +312178,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4H" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" } }, "Architectures": [ @@ -108457,16 +312192,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_s32", + "name": "vst2q_s32", "arguments": [ "int32_t * ptr", - "int32x2x4_t val" + "int32x4x2_t val" ], "return_type": { "value": "void" @@ -108475,8 +312210,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2S" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" } }, "Architectures": [ @@ -108486,16 +312224,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_s64", + "name": "vst2q_s64", "arguments": [ "int64_t * ptr", - "int64x1x4_t val" + "int64x2x2_t val" ], "return_type": { "value": "void" @@ -108504,27 +312242,28 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_s8", + "name": "vst2q_s8", "arguments": [ "int8_t * ptr", - "int8x8x4_t val" + "int8x16x2_t val" ], "return_type": { "value": "void" @@ -108533,8 +312272,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8B" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" } }, "Architectures": [ @@ -108544,16 +312286,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_u16", + "name": "vst2q_u16", "arguments": [ "uint16_t * ptr", - "uint16x4x4_t val" + "uint16x8x2_t val" ], "return_type": { "value": "void" @@ -108562,8 +312304,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4H" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" } }, "Architectures": [ @@ -108573,16 +312318,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_u32", + "name": "vst2q_u32", "arguments": [ "uint32_t * ptr", - "uint32x2x4_t val" + "uint32x4x2_t val" ], "return_type": { "value": "void" @@ -108591,8 +312336,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2S" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" } }, "Architectures": [ @@ -108602,16 +312350,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_u64", + "name": "vst2q_u64", "arguments": [ "uint64_t * ptr", - "uint64x1x4_t val" + "uint64x2x2_t val" ], "return_type": { "value": "void" @@ -108620,27 +312368,28 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_u8", + "name": "vst2q_u8", "arguments": [ "uint8_t * ptr", - "uint8x8x4_t val" + "uint8x16x2_t val" ], "return_type": { "value": "void" @@ -108649,8 +312398,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8B" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" } }, "Architectures": [ @@ -108660,16 +312412,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_f16", + "name": "vst3_f16", "arguments": [ "float16_t * ptr", - "float16x8x4_t val" + "float16x4x3_t val" ], "return_type": { "value": "void" @@ -108678,8 +312430,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ @@ -108689,16 +312447,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_f32", + "name": "vst3_f32", "arguments": [ "float32_t * ptr", - "float32x4x4_t val" + "float32x2x3_t val" ], "return_type": { "value": "void" @@ -108707,8 +312465,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" } }, "Architectures": [ @@ -108718,16 +312482,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_f64", + "name": "vst3_f64", "arguments": [ "float64_t * ptr", - "float64x2x4_t val" + "float64x1x3_t val" ], "return_type": { "value": "void" @@ -108736,8 +312500,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ @@ -108745,16 +312515,16 @@ ], "instructions": [ [ - "ST4" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_f16", + "name": "vst3_lane_f16", "arguments": [ "float16_t * ptr", - "float16x8x4_t val", + "float16x4x3_t val", "const int lane" ], "return_type": { @@ -108763,13 +312533,19 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ @@ -108779,16 +312555,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_f32", + "name": "vst3_lane_f32", "arguments": [ "float32_t * ptr", - "float32x4x4_t val", + "float32x2x3_t val", "const int lane" ], "return_type": { @@ -108797,13 +312573,19 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 3 + "maximum": 1 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" } }, "Architectures": [ @@ -108813,16 +312595,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_f64", + "name": "vst3_lane_f64", "arguments": [ "float64_t * ptr", - "float64x2x4_t val", + "float64x1x3_t val", "const int lane" ], "return_type": { @@ -108831,13 +312613,19 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 0 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ @@ -108845,16 +312633,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_p16", + "name": "vst3_lane_p16", "arguments": [ "poly16_t * ptr", - "poly16x8x4_t val", + "poly16x4x3_t val", "const int lane" ], "return_type": { @@ -108863,13 +312651,19 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ @@ -108879,16 +312673,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_p64", + "name": "vst3_lane_p64", "arguments": [ "poly64_t * ptr", - "poly64x2x4_t val", + "poly64x1x3_t val", "const int lane" ], "return_type": { @@ -108897,13 +312691,19 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 0 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ @@ -108911,16 +312711,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_p8", + "name": "vst3_lane_p8", "arguments": [ "poly8_t * ptr", - "poly8x16x4_t val", + "poly8x8x3_t val", "const int lane" ], "return_type": { @@ -108929,30 +312729,38 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 15 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_s16", + "name": "vst3_lane_s16", "arguments": [ "int16_t * ptr", - "int16x8x4_t val", + "int16x4x3_t val", "const int lane" ], "return_type": { @@ -108961,13 +312769,19 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ @@ -108977,16 +312791,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_s32", + "name": "vst3_lane_s32", "arguments": [ "int32_t * ptr", - "int32x4x4_t val", + "int32x2x3_t val", "const int lane" ], "return_type": { @@ -108995,13 +312809,19 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 3 + "maximum": 1 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" } }, "Architectures": [ @@ -109011,16 +312831,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_s64", + "name": "vst3_lane_s64", "arguments": [ "int64_t * ptr", - "int64x2x4_t val", + "int64x1x3_t val", "const int lane" ], "return_type": { @@ -109029,13 +312849,19 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 0 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ @@ -109043,16 +312869,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_s8", + "name": "vst3_lane_s8", "arguments": [ "int8_t * ptr", - "int8x16x4_t val", + "int8x8x3_t val", "const int lane" ], "return_type": { @@ -109061,30 +312887,38 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 15 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_u16", + "name": "vst3_lane_u16", "arguments": [ "uint16_t * ptr", - "uint16x8x4_t val", + "uint16x4x3_t val", "const int lane" ], "return_type": { @@ -109093,13 +312927,19 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ @@ -109109,16 +312949,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_u32", + "name": "vst3_lane_u32", "arguments": [ "uint32_t * ptr", - "uint32x4x4_t val", + "uint32x2x3_t val", "const int lane" ], "return_type": { @@ -109127,13 +312967,19 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 3 + "maximum": 1 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" } }, "Architectures": [ @@ -109143,16 +312989,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_u64", + "name": "vst3_lane_u64", "arguments": [ "uint64_t * ptr", - "uint64x2x4_t val", + "uint64x1x3_t val", "const int lane" ], "return_type": { @@ -109161,13 +313007,19 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 0 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ @@ -109175,16 +313027,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_u8", + "name": "vst3_lane_u8", "arguments": [ "uint8_t * ptr", - "uint8x16x4_t val", + "uint8x8x3_t val", "const int lane" ], "return_type": { @@ -109193,30 +313045,38 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 15 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_p16", + "name": "vst3_p16", "arguments": [ "poly16_t * ptr", - "poly16x8x4_t val" + "poly16x4x3_t val" ], "return_type": { "value": "void" @@ -109225,8 +313085,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ @@ -109236,16 +313102,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_p64", + "name": "vst3_p64", "arguments": [ "poly64_t * ptr", - "poly64x2x4_t val" + "poly64x1x3_t val" ], "return_type": { "value": "void" @@ -109254,25 +313120,32 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "ST4" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_p8", + "name": "vst3_p8", "arguments": [ "poly8_t * ptr", - "poly8x16x4_t val" + "poly8x8x3_t val" ], "return_type": { "value": "void" @@ -109281,8 +313154,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" } }, "Architectures": [ @@ -109292,16 +313171,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_s16", + "name": "vst3_s16", "arguments": [ "int16_t * ptr", - "int16x8x4_t val" + "int16x4x3_t val" ], "return_type": { "value": "void" @@ -109310,8 +313189,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ @@ -109321,16 +313206,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_s32", + "name": "vst3_s32", "arguments": [ "int32_t * ptr", - "int32x4x4_t val" + "int32x2x3_t val" ], "return_type": { "value": "void" @@ -109339,8 +313224,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" } }, "Architectures": [ @@ -109350,16 +313241,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_s64", + "name": "vst3_s64", "arguments": [ "int64_t * ptr", - "int64x2x4_t val" + "int64x1x3_t val" ], "return_type": { "value": "void" @@ -109368,25 +313259,33 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST4" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_s8", + "name": "vst3_s8", "arguments": [ "int8_t * ptr", - "int8x16x4_t val" + "int8x8x3_t val" ], "return_type": { "value": "void" @@ -109395,8 +313294,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" } }, "Architectures": [ @@ -109406,16 +313311,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_u16", + "name": "vst3_u16", "arguments": [ "uint16_t * ptr", - "uint16x8x4_t val" + "uint16x4x3_t val" ], "return_type": { "value": "void" @@ -109424,8 +313329,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ @@ -109435,16 +313346,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_u32", + "name": "vst3_u32", "arguments": [ "uint32_t * ptr", - "uint32x4x4_t val" + "uint32x2x3_t val" ], "return_type": { "value": "void" @@ -109453,8 +313364,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" } }, "Architectures": [ @@ -109464,16 +313381,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_u64", + "name": "vst3_u64", "arguments": [ "uint64_t * ptr", - "uint64x2x4_t val" + "uint64x1x3_t val" ], "return_type": { "value": "void" @@ -109482,25 +313399,33 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST4" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_u8", + "name": "vst3_u8", "arguments": [ "uint8_t * ptr", - "uint8x16x4_t val" + "uint8x8x3_t val" ], "return_type": { "value": "void" @@ -109509,8 +313434,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" } }, "Architectures": [ @@ -109520,16 +313451,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vstrq_p128", + "name": "vst3q_f16", "arguments": [ - "poly128_t * ptr", - "poly128_t val" + "float16_t * ptr", + "float16x8x3_t val" ], "return_type": { "value": "void" @@ -109538,105 +313469,84 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Qt" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "STR" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vstl1_lane_f64", + "name": "vst3q_f32", "arguments": [ - "float64_t * ptr", - "float64x1_t val", - "const int lane" + "float32_t * ptr", + "float32x4x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.1D" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "STL1" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vstl1q_lane_f64", + "name": "vst3q_f64", "arguments": [ "float64_t * ptr", - "float64x2_t val", - "const int lane" + "float64x2x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { "register": "Vt.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STL1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vstl1_lane_p64", - "arguments": [ - "poly64_t * ptr", - "poly64x1_t val", - "const int lane" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, - "ptr": { - "register": "Xn" }, - "val": { - "register": "Vt.1D" + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ @@ -109644,16 +313554,16 @@ ], "instructions": [ [ - "STL1" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vstl1q_lane_p64", + "name": "vst3q_lane_f16", "arguments": [ - "poly64_t * ptr", - "poly64x2_t val", + "float16_t * ptr", + "float16x8x3_t val", "const int lane" ], "return_type": { @@ -109662,30 +313572,38 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.2D" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "STL1" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vstl1_lane_u64", + "name": "vst3q_lane_f32", "arguments": [ - "uint64_t * ptr", - "uint64x1_t val", + "float32_t * ptr", + "float32x4x3_t val", "const int lane" ], "return_type": { @@ -109694,30 +313612,38 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 0 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.1D" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "STL1" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vstl1q_lane_u64", + "name": "vst3q_lane_f64", "arguments": [ - "uint64_t * ptr", - "uint64x2_t val", + "float64_t * ptr", + "float64x2x3_t val", "const int lane" ], "return_type": { @@ -109731,8 +313657,14 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ @@ -109740,16 +313672,16 @@ ], "instructions": [ [ - "STL1" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vstl1_lane_s64", + "name": "vst3q_lane_p16", "arguments": [ - "int64_t * ptr", - "int64x1_t val", + "poly16_t * ptr", + "poly16x8x3_t val", "const int lane" ], "return_type": { @@ -109758,30 +313690,38 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 0 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.1D" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "STL1" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vstl1q_lane_s64", + "name": "vst3q_lane_p64", "arguments": [ - "int64_t * ptr", - "int64x2_t val", + "poly64_t * ptr", + "poly64x2x3_t val", "const int lane" ], "return_type": { @@ -109795,8 +313735,14 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ @@ -109804,54 +313750,77 @@ ], "instructions": [ [ - "STL1" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsub_f16", + "name": "vst3q_lane_p8", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "poly8_t * ptr", + "poly8x16x3_t val", + "const int lane" ], "return_type": { - "value": "float16x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "lane": { + "minimum": 0, + "maximum": 15 }, - "b": { - "register": "Vm.4H" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FSUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsub_f32", + "name": "vst3q_lane_s16", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "int16_t * ptr", + "int16x8x3_t val", + "const int lane" ], "return_type": { - "value": "float32x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "lane": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.2S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ @@ -109861,53 +313830,37 @@ ], "instructions": [ [ - "FSUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsub_f64", + "name": "vst3q_lane_s32", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "int32_t * ptr", + "int32x4x3_t val", + "const int lane" ], "return_type": { - "value": "float64x1_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "lane": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Dm" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FSUB" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vsub_s16", - "arguments": [ - "int16x4_t a", - "int16x4_t b" - ], - "return_type": { - "value": "int16x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.4H" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" } }, "Architectures": [ @@ -109917,55 +313870,75 @@ ], "instructions": [ [ - "SUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsub_s32", + "name": "vst3q_lane_s64", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "int64_t * ptr", + "int64x2x3_t val", + "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "lane": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.2S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsub_s64", + "name": "vst3q_lane_s8", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "int8_t * ptr", + "int8x16x3_t val", + "const int lane" ], "return_type": { - "value": "int64x1_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "lane": { + "minimum": 0, + "maximum": 15 }, - "b": { - "register": "Dm" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" } }, "Architectures": [ @@ -109975,26 +313948,37 @@ ], "instructions": [ [ - "SUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsub_s8", + "name": "vst3q_lane_u16", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "uint16_t * ptr", + "uint16x8x3_t val", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "lane": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.8B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ @@ -110004,26 +313988,37 @@ ], "instructions": [ [ - "SUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsub_u16", + "name": "vst3q_lane_u32", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "uint32_t * ptr", + "uint32x4x3_t val", + "const int lane" ], "return_type": { - "value": "uint16x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "lane": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.4H" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" } }, "Architectures": [ @@ -110033,55 +314028,75 @@ ], "instructions": [ [ - "SUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsub_u32", + "name": "vst3q_lane_u64", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "uint64_t * ptr", + "uint64x2x3_t val", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "lane": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.2S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsub_u64", + "name": "vst3q_lane_u8", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "uint8_t * ptr", + "uint8x16x3_t val", + "const int lane" ], "return_type": { - "value": "uint64x1_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "lane": { + "minimum": 0, + "maximum": 15 }, - "b": { - "register": "Dm" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" } }, "Architectures": [ @@ -110091,26 +314106,32 @@ ], "instructions": [ [ - "SUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsub_u8", + "name": "vst3q_p16", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "poly16_t * ptr", + "poly16x8x3_t val" ], "return_type": { - "value": "uint8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.8B" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ @@ -110120,26 +314141,32 @@ ], "instructions": [ [ - "SUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubd_s64", + "name": "vst3q_p64", "arguments": [ - "int64_t a", - "int64_t b" + "poly64_t * ptr", + "poly64x2x3_t val" ], "return_type": { - "value": "int64_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Dm" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ @@ -110147,116 +314174,137 @@ ], "instructions": [ [ - "SUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubd_u64", + "name": "vst3q_p8", "arguments": [ - "uint64_t a", - "uint64_t b" + "poly8_t * ptr", + "poly8x16x3_t val" ], "return_type": { - "value": "uint64_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Dm" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubh_f16", + "name": "vst3q_s16", "arguments": [ - "float16_t a", - "float16_t b" + "int16_t * ptr", + "int16x8x3_t val" ], "return_type": { - "value": "float16_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Hm" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FSUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_high_s16", + "name": "vst3q_s32", "arguments": [ - "int8x8_t r", - "int16x8_t a", - "int16x8_t b" + "int32_t * ptr", + "int32x4x3_t val" ], "return_type": { - "value": "int8x16_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.8H" + "val.val[0]": { + "register": "Vt.4S" }, - "r": { - "register": "Vd.8B" + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SUBHN2" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_high_s32", + "name": "vst3q_s64", "arguments": [ - "int16x4_t r", - "int32x4_t a", - "int32x4_t b" + "int64_t * ptr", + "int64x2x3_t val" ], "return_type": { - "value": "int16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.4S" + "val.val[0]": { + "register": "Vt.2D" }, - "r": { - "register": "Vd.4H" + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ @@ -110264,123 +314312,137 @@ ], "instructions": [ [ - "SUBHN2" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_high_s64", + "name": "vst3q_s8", "arguments": [ - "int32x2_t r", - "int64x2_t a", - "int64x2_t b" + "int8_t * ptr", + "int8x16x3_t val" ], "return_type": { - "value": "int32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.2D" + "val.val[0]": { + "register": "Vt.16B" }, - "r": { - "register": "Vd.2S" + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SUBHN2" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_high_u16", + "name": "vst3q_u16", "arguments": [ - "uint8x8_t r", - "uint16x8_t a", - "uint16x8_t b" + "uint16_t * ptr", + "uint16x8x3_t val" ], "return_type": { - "value": "uint8x16_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.8H" + "val.val[0]": { + "register": "Vt.8H" }, - "r": { - "register": "Vd.8B" + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SUBHN2" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_high_u32", + "name": "vst3q_u32", "arguments": [ - "uint16x4_t r", - "uint32x4_t a", - "uint32x4_t b" + "uint32_t * ptr", + "uint32x4x3_t val" ], "return_type": { - "value": "uint16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.4S" + "val.val[0]": { + "register": "Vt.4S" }, - "r": { - "register": "Vd.4H" + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SUBHN2" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_high_u64", + "name": "vst3q_u64", "arguments": [ - "uint32x2_t r", - "uint64x2_t a", - "uint64x2_t b" + "uint64_t * ptr", + "uint64x2x3_t val" ], "return_type": { - "value": "uint32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.2D" + "val.val[0]": { + "register": "Vt.2D" }, - "r": { - "register": "Vd.2S" + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ @@ -110388,26 +314450,32 @@ ], "instructions": [ [ - "SUBHN2" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_s16", + "name": "vst3q_u8", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "uint8_t * ptr", + "uint8x16x3_t val" ], "return_type": { - "value": "int8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.8H" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" } }, "Architectures": [ @@ -110417,26 +314485,35 @@ ], "instructions": [ [ - "SUBHN" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_s32", + "name": "vst4_f16", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "float16_t * ptr", + "float16x4x4_t val" ], "return_type": { - "value": "int16x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.4S" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ @@ -110446,26 +314523,35 @@ ], "instructions": [ [ - "SUBHN" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_s64", + "name": "vst4_f32", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "float32_t * ptr", + "float32x2x4_t val" ], "return_type": { - "value": "int32x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.2D" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { + "register": "Vt4.2S" } }, "Architectures": [ @@ -110475,55 +314561,76 @@ ], "instructions": [ [ - "SUBHN" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_u16", + "name": "vst4_f64", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "float64_t * ptr", + "float64x1x4_t val" ], "return_type": { - "value": "uint8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.8H" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SUBHN" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_u32", + "name": "vst4_lane_f16", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "float16_t * ptr", + "float16x4x4_t val", + "const int lane" ], "return_type": { - "value": "uint16x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "lane": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.4S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ @@ -110533,26 +314640,40 @@ ], "instructions": [ [ - "SUBHN" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_u64", + "name": "vst4_lane_f32", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "float32_t * ptr", + "float32x2x4_t val", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "lane": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.2D" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { + "register": "Vt4.2S" } }, "Architectures": [ @@ -110562,26 +314683,40 @@ ], "instructions": [ [ - "SUBHN" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_high_s16", + "name": "vst4_lane_f64", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "float64_t * ptr", + "float64x1x4_t val", + "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "lane": { + "minimum": 0, + "maximum": 0 }, - "b": { - "register": "Vm.8H" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ @@ -110589,53 +314724,83 @@ ], "instructions": [ [ - "SSUBL2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_high_s32", + "name": "vst4_lane_p16", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "poly16_t * ptr", + "poly16x4x4_t val", + "const int lane" ], "return_type": { - "value": "int64x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "lane": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.4S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SSUBL2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_high_s8", + "name": "vst4_lane_p64", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "poly64_t * ptr", + "poly64x1x4_t val", + "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 0 }, - "b": { - "register": "Vm.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ @@ -110643,136 +314808,210 @@ ], "instructions": [ [ - "SSUBL2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_high_u16", + "name": "vst4_lane_p8", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "poly8_t * ptr", + "poly8x8x4_t val", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "lane": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.8H" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { + "register": "Vt4.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USUBL2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_high_u32", + "name": "vst4_lane_s16", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int16_t * ptr", + "int16x4x4_t val", + "const int lane" ], "return_type": { - "value": "uint64x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "lane": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.4S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USUBL2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_high_u8", + "name": "vst4_lane_s32", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "int32_t * ptr", + "int32x2x4_t val", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { + "register": "Vt4.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USUBL2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_s16", + "name": "vst4_lane_s64", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "int64_t * ptr", + "int64x1x4_t val", + "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "lane": { + "minimum": 0, + "maximum": 0 }, - "b": { - "register": "Vm.4H" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SSUBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_s32", + "name": "vst4_lane_s8", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "int8_t * ptr", + "int8x8x4_t val", + "const int lane" ], "return_type": { - "value": "int64x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "lane": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.2S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { + "register": "Vt4.8B" } }, "Architectures": [ @@ -110782,26 +315021,40 @@ ], "instructions": [ [ - "SSUBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_s8", + "name": "vst4_lane_u16", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "uint16_t * ptr", + "uint16x4x4_t val", + "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "lane": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.8B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ @@ -110811,26 +315064,40 @@ ], "instructions": [ [ - "SSUBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_u16", + "name": "vst4_lane_u32", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "uint32_t * ptr", + "uint32x2x4_t val", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "lane": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.4H" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { + "register": "Vt4.2S" } }, "Architectures": [ @@ -110840,55 +315107,81 @@ ], "instructions": [ [ - "USUBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_u32", + "name": "vst4_lane_u64", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "uint64_t * ptr", + "uint64x1x4_t val", + "const int lane" ], "return_type": { - "value": "uint64x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "lane": { + "minimum": 0, + "maximum": 0 }, - "b": { - "register": "Vm.2S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USUBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_u8", + "name": "vst4_lane_u8", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "uint8_t * ptr", + "uint8x8x4_t val", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "lane": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.8B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { + "register": "Vt4.8B" } }, "Architectures": [ @@ -110898,110 +315191,148 @@ ], "instructions": [ [ - "USUBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubq_f16", + "name": "vst4_p16", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "poly16_t * ptr", + "poly16x4x4_t val" ], "return_type": { - "value": "float16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FSUB" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubq_f32", + "name": "vst4_p64", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "poly64_t * ptr", + "poly64x1x4_t val" ], "return_type": { - "value": "float32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.4S" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "FSUB" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubq_f64", + "name": "vst4_p8", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "poly8_t * ptr", + "poly8x8x4_t val" ], "return_type": { - "value": "float64x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.2D" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { + "register": "Vt4.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FSUB" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubq_s16", + "name": "vst4_s16", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "int16_t * ptr", + "int16x4x4_t val" ], "return_type": { - "value": "int16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ @@ -111011,26 +315342,35 @@ ], "instructions": [ [ - "SUB" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubq_s32", + "name": "vst4_s32", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int32_t * ptr", + "int32x2x4_t val" ], "return_type": { - "value": "int32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { + "register": "Vt4.2S" } }, "Architectures": [ @@ -111040,26 +315380,35 @@ ], "instructions": [ [ - "SUB" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubq_s64", + "name": "vst4_s64", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "int64_t * ptr", + "int64x1x4_t val" ], "return_type": { - "value": "int64x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ @@ -111069,26 +315418,35 @@ ], "instructions": [ [ - "SUB" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubq_s8", + "name": "vst4_s8", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "int8_t * ptr", + "int8x8x4_t val" ], "return_type": { - "value": "int8x16_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { + "register": "Vt4.8B" } }, "Architectures": [ @@ -111098,26 +315456,35 @@ ], "instructions": [ [ - "SUB" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubq_u16", + "name": "vst4_u16", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "uint16_t * ptr", + "uint16x4x4_t val" ], "return_type": { - "value": "uint16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ @@ -111127,26 +315494,35 @@ ], "instructions": [ [ - "SUB" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubq_u32", + "name": "vst4_u32", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "uint32_t * ptr", + "uint32x2x4_t val" ], "return_type": { - "value": "uint32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { + "register": "Vt4.2S" } }, "Architectures": [ @@ -111156,26 +315532,35 @@ ], "instructions": [ [ - "SUB" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubq_u64", + "name": "vst4_u64", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "uint64_t * ptr", + "uint64x1x4_t val" ], "return_type": { - "value": "uint64x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ @@ -111185,26 +315570,35 @@ ], "instructions": [ [ - "SUB" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubq_u8", + "name": "vst4_u8", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "uint8_t * ptr", + "uint8x8x4_t val" ], "return_type": { - "value": "uint8x16_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { + "register": "Vt4.8B" } }, "Architectures": [ @@ -111214,80 +315608,111 @@ ], "instructions": [ [ - "SUB" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_high_s16", + "name": "vst4q_f16", "arguments": [ - "int32x4_t a", - "int16x8_t b" + "float16_t * ptr", + "float16x8x4_t val" ], "return_type": { - "value": "int32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.8H" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { + "register": "Vt4.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SSUBW2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_high_s32", + "name": "vst4q_f32", "arguments": [ - "int64x2_t a", - "int32x4_t b" + "float32_t * ptr", + "float32x4x4_t val" ], "return_type": { - "value": "int64x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.4S" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { + "register": "Vt4.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SSUBW2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_high_s8", + "name": "vst4q_f64", "arguments": [ - "int16x8_t a", - "int8x16_t b" + "float64_t * ptr", + "float64x2x4_t val" ], "return_type": { - "value": "int16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.16B" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { + "register": "Vt4.2D" } }, "Architectures": [ @@ -111295,80 +315720,126 @@ ], "instructions": [ [ - "SSUBW2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_high_u16", + "name": "vst4q_lane_f16", "arguments": [ - "uint32x4_t a", - "uint16x8_t b" + "float16_t * ptr", + "float16x8x4_t val", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "lane": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.8H" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { + "register": "Vt4.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USUBW2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_high_u32", + "name": "vst4q_lane_f32", "arguments": [ - "uint64x2_t a", - "uint32x4_t b" + "float32_t * ptr", + "float32x4x4_t val", + "const int lane" ], "return_type": { - "value": "uint64x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "lane": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.4S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { + "register": "Vt4.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USUBW2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_high_u8", + "name": "vst4q_lane_f64", "arguments": [ - "uint16x8_t a", - "uint8x16_t b" + "float64_t * ptr", + "float64x2x4_t val", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "lane": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { + "register": "Vt4.2D" } }, "Architectures": [ @@ -111376,26 +315847,40 @@ ], "instructions": [ [ - "USUBW2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_s16", + "name": "vst4q_lane_p16", "arguments": [ - "int32x4_t a", - "int16x4_t b" + "poly16_t * ptr", + "poly16x8x4_t val", + "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "lane": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.4H" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { + "register": "Vt4.8H" } }, "Architectures": [ @@ -111405,84 +315890,122 @@ ], "instructions": [ [ - "SSUBW" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_s32", + "name": "vst4q_lane_p64", "arguments": [ - "int64x2_t a", - "int32x2_t b" + "poly64_t * ptr", + "poly64x2x4_t val", + "const int lane" ], "return_type": { - "value": "int64x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "lane": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.2S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { + "register": "Vt4.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SSUBW" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_s8", + "name": "vst4q_lane_p8", "arguments": [ - "int16x8_t a", - "int8x8_t b" + "poly8_t * ptr", + "poly8x16x4_t val", + "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "lane": { + "minimum": 0, + "maximum": 15 }, - "b": { - "register": "Vm.8B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { + "register": "Vt4.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SSUBW" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_u16", + "name": "vst4q_lane_s16", "arguments": [ - "uint32x4_t a", - "uint16x4_t b" + "int16_t * ptr", + "int16x8x4_t val", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "lane": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.4H" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { + "register": "Vt4.8H" } }, "Architectures": [ @@ -111492,26 +316015,40 @@ ], "instructions": [ [ - "USUBW" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_u32", + "name": "vst4q_lane_s32", "arguments": [ - "uint64x2_t a", - "uint32x2_t b" + "int32_t * ptr", + "int32x4x4_t val", + "const int lane" ], "return_type": { - "value": "uint64x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "lane": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.2S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { + "register": "Vt4.4S" } }, "Architectures": [ @@ -111521,174 +316058,208 @@ ], "instructions": [ [ - "USUBW" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_u8", + "name": "vst4q_lane_s64", "arguments": [ - "uint16x8_t a", - "uint8x8_t b" + "int64_t * ptr", + "int64x2x4_t val", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "lane": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.8B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { + "register": "Vt4.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USUBW" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsudot_lane_s32", + "name": "vst4q_lane_s8", "arguments": [ - "int32x2_t r", - "int8x8_t a", - "uint8x8_t b", + "int8_t * ptr", + "int8x16x4_t val", "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.4B" - }, "lane": { "minimum": 0, - "maximum": 1 + "maximum": 15 }, - "r": { - "register": "Vd.2S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { + "register": "Vt4.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SUDOT" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsudot_laneq_s32", + "name": "vst4q_lane_u16", "arguments": [ - "int32x2_t r", - "int8x8_t a", - "uint8x16_t b", + "uint16_t * ptr", + "uint16x8x4_t val", "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.4B" - }, "lane": { "minimum": 0, - "maximum": 3 + "maximum": 7 }, - "r": { - "register": "Vd.2S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { + "register": "Vt4.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SUDOT" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsudotq_lane_s32", + "name": "vst4q_lane_u32", "arguments": [ - "int32x4_t r", - "int8x16_t a", - "uint8x8_t b", + "uint32_t * ptr", + "uint32x4x4_t val", "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.4B" - }, "lane": { "minimum": 0, - "maximum": 1 + "maximum": 3 }, - "r": { - "register": "Vd.4S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { + "register": "Vt4.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "SUDOT" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsudotq_laneq_s32", + "name": "vst4q_lane_u64", "arguments": [ - "int32x4_t r", - "int8x16_t a", - "uint8x16_t b", + "uint64_t * ptr", + "uint64x2x4_t val", "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.4B" - }, "lane": { "minimum": 0, - "maximum": 3 + "maximum": 1 }, - "r": { - "register": "Vd.4S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { + "register": "Vt4.2D" } }, "Architectures": [ @@ -111696,52 +316267,77 @@ ], "instructions": [ [ - "SUDOT" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl1_p8", + "name": "vst4q_lane_u8", "arguments": [ - "poly8x8_t a", - "uint8x8_t idx" + "uint8_t * ptr", + "uint8x16x4_t val", + "const int lane" ], "return_type": { - "value": "poly8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 15 + }, + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" }, - "idx": {} + "val.val[3]": { + "register": "Vt4.16B" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl1_s8", + "name": "vst4q_p16", "arguments": [ - "int8x8_t a", - "int8x8_t idx" + "poly16_t * ptr", + "poly16x8x4_t val" ], "return_type": { - "value": "int8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" }, - "idx": {} + "val.val[3]": { + "register": "Vt4.8H" + } }, "Architectures": [ "v7", @@ -111750,52 +316346,72 @@ ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl1_u8", + "name": "vst4q_p64", "arguments": [ - "uint8x8_t a", - "uint8x8_t idx" + "poly64_t * ptr", + "poly64x2x4_t val" ], "return_type": { - "value": "uint8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" }, - "idx": {} + "val.val[3]": { + "register": "Vt4.2D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl2_p8", + "name": "vst4q_p8", "arguments": [ - "poly8x8x2_t a", - "uint8x8_t idx" + "poly8_t * ptr", + "poly8x16x4_t val" ], "return_type": { - "value": "poly8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" }, - "idx": {} + "val.val[3]": { + "register": "Vt4.16B" + } }, "Architectures": [ "v7", @@ -111804,25 +316420,36 @@ ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl2_s8", + "name": "vst4q_s16", "arguments": [ - "int8x8x2_t a", - "int8x8_t idx" + "int16_t * ptr", + "int16x8x4_t val" ], "return_type": { - "value": "int8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" }, - "idx": {} + "val.val[3]": { + "register": "Vt4.8H" + } }, "Architectures": [ "v7", @@ -111831,25 +316458,36 @@ ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl2_u8", + "name": "vst4q_s32", "arguments": [ - "uint8x8x2_t a", - "uint8x8_t idx" + "int32_t * ptr", + "int32x4x4_t val" ], "return_type": { - "value": "uint8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" }, - "idx": {} + "val.val[3]": { + "register": "Vt4.4S" + } }, "Architectures": [ "v7", @@ -111858,52 +316496,72 @@ ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl3_p8", + "name": "vst4q_s64", "arguments": [ - "poly8x8x3_t a", - "uint8x8_t idx" + "int64_t * ptr", + "int64x2x4_t val" ], "return_type": { - "value": "poly8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" }, - "idx": {} + "val.val[3]": { + "register": "Vt4.2D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl3_s8", + "name": "vst4q_s8", "arguments": [ - "int8x8x3_t a", - "int8x8_t idx" + "int8_t * ptr", + "int8x16x4_t val" ], "return_type": { - "value": "int8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" }, - "idx": {} + "val.val[3]": { + "register": "Vt4.16B" + } }, "Architectures": [ "v7", @@ -111912,25 +316570,36 @@ ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl3_u8", + "name": "vst4q_u16", "arguments": [ - "uint8x8x3_t a", - "uint8x8_t idx" + "uint16_t * ptr", + "uint16x8x4_t val" ], "return_type": { - "value": "uint8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" }, - "idx": {} + "val.val[3]": { + "register": "Vt4.8H" + } }, "Architectures": [ "v7", @@ -111939,25 +316608,36 @@ ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl4_p8", + "name": "vst4q_u32", "arguments": [ - "poly8x8x4_t a", - "uint8x8_t idx" + "uint32_t * ptr", + "uint32x4x4_t val" ], "return_type": { - "value": "poly8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" }, - "idx": {} + "val.val[3]": { + "register": "Vt4.4S" + } }, "Architectures": [ "v7", @@ -111966,52 +316646,72 @@ ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl4_s8", + "name": "vst4q_u64", "arguments": [ - "int8x8x4_t a", - "int8x8_t idx" + "uint64_t * ptr", + "uint64x2x4_t val" ], "return_type": { - "value": "int8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" }, - "idx": {} + "val.val[3]": { + "register": "Vt4.2D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl4_u8", + "name": "vst4q_u8", "arguments": [ - "uint8x8x4_t a", - "uint8x8_t idx" + "uint8_t * ptr", + "uint8x16x4_t val" ], "return_type": { - "value": "uint8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" }, - "idx": {} + "val.val[3]": { + "register": "Vt4.16B" + } }, "Architectures": [ "v7", @@ -112020,335 +316720,339 @@ ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx1_p8", + "name": "vstl1_lane_f64", "arguments": [ - "poly8x8_t a", - "poly8x8_t b", - "uint8x8_t idx" + "float64_t * ptr", + "float64x1_t val", + "const int lane" ], "return_type": { - "value": "poly8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 0 }, - "idx": {} + "ptr": { + "register": "Xn" + }, + "val": { + "register": "Vt.1D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOVI", - "CMHS", - "TBL", - "BIF" + "STL1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx1_s8", + "name": "vstl1_lane_p64", "arguments": [ - "int8x8_t a", - "int8x8_t b", - "int8x8_t idx" + "poly64_t * ptr", + "poly64x1_t val", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 0 }, - "idx": {} + "ptr": { + "register": "Xn" + }, + "val": { + "register": "Vt.1D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOVI", - "CMHS", - "TBL", - "BIF" + "STL1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx1_u8", + "name": "vstl1_lane_s64", "arguments": [ - "uint8x8_t a", - "uint8x8_t b", - "uint8x8_t idx" + "int64_t * ptr", + "int64x1_t val", + "const int lane" ], "return_type": { - "value": "uint8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 0 }, - "idx": {} + "ptr": { + "register": "Xn" + }, + "val": { + "register": "Vt.1D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOVI", - "CMHS", - "TBL", - "BIF" + "STL1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx2_p8", + "name": "vstl1_lane_u64", "arguments": [ - "poly8x8_t a", - "poly8x8x2_t b", - "uint8x8_t idx" + "uint64_t * ptr", + "uint64x1_t val", + "const int lane" ], "return_type": { - "value": "poly8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 0 }, - "idx": {} + "ptr": { + "register": "Xn" + }, + "val": { + "register": "Vt.1D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TBX" + "STL1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx2_s8", + "name": "vstl1q_lane_f64", "arguments": [ - "int8x8_t a", - "int8x8x2_t b", - "int8x8_t idx" + "float64_t * ptr", + "float64x2_t val", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "ptr": { + "register": "Xn" }, - "idx": {} + "val": { + "register": "Vt.2D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TBX" + "STL1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx2_u8", + "name": "vstl1q_lane_p64", "arguments": [ - "uint8x8_t a", - "uint8x8x2_t b", - "uint8x8_t idx" + "poly64_t * ptr", + "poly64x2_t val", + "const int lane" ], "return_type": { - "value": "uint8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "ptr": { + "register": "Xn" }, - "idx": {} + "val": { + "register": "Vt.2D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TBX" + "STL1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx3_p8", + "name": "vstl1q_lane_s64", "arguments": [ - "poly8x8_t a", - "poly8x8x3_t b", - "uint8x8_t idx" + "int64_t * ptr", + "int64x2_t val", + "const int lane" ], "return_type": { - "value": "poly8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "ptr": { + "register": "Xn" }, - "idx": {} + "val": { + "register": "Vt.2D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOVI", - "CMHS", - "TBL", - "BIF" + "STL1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx3_s8", + "name": "vstl1q_lane_u64", "arguments": [ - "int8x8_t a", - "int8x8x3_t b", - "int8x8_t idx" + "uint64_t * ptr", + "uint64x2_t val", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "ptr": { + "register": "Xn" }, - "idx": {} + "val": { + "register": "Vt.2D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOVI", - "CMHS", - "TBL", - "BIF" + "STL1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx3_u8", + "name": "vstrq_p128", "arguments": [ - "uint8x8_t a", - "uint8x8x3_t b", - "uint8x8_t idx" + "poly128_t * ptr", + "poly128_t val" ], "return_type": { - "value": "uint8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" }, - "idx": {} + "val": { + "register": "Qt" + } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "MOVI", - "CMHS", - "TBL", - "BIF" + "STR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx4_p8", + "name": "vsub_f16", "arguments": [ - "poly8x8_t a", - "poly8x8x4_t b", - "uint8x8_t idx" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "a": { + "register": "Vn.4H" }, - "idx": {} + "b": { + "register": "Vm.4H" + } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "TBX" + "FSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx4_s8", + "name": "vsub_f32", "arguments": [ - "int8x8_t a", - "int8x8x4_t b", - "int8x8_t idx" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "int8x8_t" + "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "a": { + "register": "Vn.2S" }, - "idx": {} + "b": { + "register": "Vm.2S" + } }, "Architectures": [ "v7", @@ -112357,48 +317061,46 @@ ], "instructions": [ [ - "TBX" + "FSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx4_u8", + "name": "vsub_f64", "arguments": [ - "uint8x8_t a", - "uint8x8x4_t b", - "uint8x8_t idx" + "float64x1_t a", + "float64x1_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "float64x1_t" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "a": { + "register": "Dn" }, - "idx": {} + "b": { + "register": "Dm" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TBX" + "FSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1_f16", + "name": "vsub_s16", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "float16x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { @@ -112409,23 +317111,25 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1_f32", + "name": "vsub_s32", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "float32x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { @@ -112436,50 +317140,54 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1_p16", + "name": "vsub_s64", "arguments": [ - "poly16x4_t a", - "poly16x4_t b" + "int64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "poly16x4_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Dn" }, "b": { - "register": "Vm.4H" + "register": "Dm" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1_p8", + "name": "vsub_s8", "arguments": [ - "poly8x8_t a", - "poly8x8_t b" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "poly8x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { @@ -112490,23 +317198,25 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1_s16", + "name": "vsub_u16", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "int16x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { @@ -112517,23 +317227,25 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1_s32", + "name": "vsub_u32", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "int32x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { @@ -112544,23 +317256,54 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1_s8", + "name": "vsub_u64", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "uint64x1_t a", + "uint64x1_t b" ], "return_type": { - "value": "int8x8_t" + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vsub_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { @@ -112571,30 +317314,32 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1_u16", + "name": "vsubd_s64", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "int64_t a", + "int64_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Dn" }, "b": { - "register": "Vm.4H" + "register": "Dm" } }, "Architectures": [ @@ -112602,26 +317347,26 @@ ], "instructions": [ [ - "TRN1" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1_u32", + "name": "vsubd_u64", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "uint64_t a", + "uint64_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Dn" }, "b": { - "register": "Vm.2S" + "register": "Dm" } }, "Architectures": [ @@ -112629,46 +317374,48 @@ ], "instructions": [ [ - "TRN1" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1_u8", + "name": "vsubh_f16", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "float16_t a", + "float16_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Hn" }, "b": { - "register": "Vm.8B" + "register": "Hm" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "TRN1" + "FSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_f16", + "name": "vsubhn_high_s16", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "int8x8_t r", + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "float16x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { @@ -112676,6 +317423,9 @@ }, "b": { "register": "Vm.8H" + }, + "r": { + "register": "Vd.8B" } }, "Architectures": [ @@ -112683,19 +317433,20 @@ ], "instructions": [ [ - "TRN1" + "SUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_f32", + "name": "vsubhn_high_s32", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "int16x4_t r", + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "float32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -112703,6 +317454,9 @@ }, "b": { "register": "Vm.4S" + }, + "r": { + "register": "Vd.4H" } }, "Architectures": [ @@ -112710,19 +317464,20 @@ ], "instructions": [ [ - "TRN1" + "SUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_f64", + "name": "vsubhn_high_s64", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "int32x2_t r", + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "float64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -112730,6 +317485,9 @@ }, "b": { "register": "Vm.2D" + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ @@ -112737,19 +317495,20 @@ ], "instructions": [ [ - "TRN1" + "SUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_p16", + "name": "vsubhn_high_u16", "arguments": [ - "poly16x8_t a", - "poly16x8_t b" + "uint8x8_t r", + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { @@ -112757,6 +317516,9 @@ }, "b": { "register": "Vm.8H" + }, + "r": { + "register": "Vd.8B" } }, "Architectures": [ @@ -112764,26 +317526,30 @@ ], "instructions": [ [ - "TRN1" + "SUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_p64", + "name": "vsubhn_high_u32", "arguments": [ - "poly64x2_t a", - "poly64x2_t b" + "uint16x4_t r", + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "poly64x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.4S" }, "b": { - "register": "Vm.2D" + "register": "Vm.4S" + }, + "r": { + "register": "Vd.4H" } }, "Architectures": [ @@ -112791,26 +317557,30 @@ ], "instructions": [ [ - "TRN1" + "SUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_p8", + "name": "vsubhn_high_u64", "arguments": [ - "poly8x16_t a", - "poly8x16_t b" + "uint32x2_t r", + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.2D" }, "b": { - "register": "Vm.16B" + "register": "Vm.2D" + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ @@ -112818,19 +317588,19 @@ ], "instructions": [ [ - "TRN1" + "SUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_s16", + "name": "vsubhn_s16", "arguments": [ "int16x8_t a", "int16x8_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { @@ -112841,23 +317611,25 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_s32", + "name": "vsubhn_s32", "arguments": [ "int32x4_t a", "int32x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { @@ -112868,23 +317640,25 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_s64", + "name": "vsubhn_s64", "arguments": [ "int64x2_t a", "int64x2_t b" ], "return_type": { - "value": "int64x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { @@ -112895,111 +317669,119 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_s8", + "name": "vsubhn_u16", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int8x16_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, "b": { - "register": "Vm.16B" + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_u16", + "name": "vsubhn_u32", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4S" }, "b": { - "register": "Vm.8H" + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_u32", + "name": "vsubhn_u64", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2D" }, "b": { - "register": "Vm.4S" + "register": "Vm.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_u64", + "name": "vsubl_high_s16", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.8H" }, "b": { - "register": "Vm.2D" + "register": "Vm.8H" } }, "Architectures": [ @@ -113007,26 +317789,26 @@ ], "instructions": [ [ - "TRN1" + "SSUBL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_u8", + "name": "vsubl_high_s32", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" }, "b": { - "register": "Vm.16B" + "register": "Vm.4S" } }, "Architectures": [ @@ -113034,26 +317816,26 @@ ], "instructions": [ [ - "TRN1" + "SSUBL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2_f16", + "name": "vsubl_high_s8", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "float16x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.16B" }, "b": { - "register": "Vm.4H" + "register": "Vm.16B" } }, "Architectures": [ @@ -113061,26 +317843,26 @@ ], "instructions": [ [ - "TRN2" + "SSUBL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2_f32", + "name": "vsubl_high_u16", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "float32x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.8H" }, "b": { - "register": "Vm.2S" + "register": "Vm.8H" } }, "Architectures": [ @@ -113088,26 +317870,26 @@ ], "instructions": [ [ - "TRN2" + "USUBL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2_p16", + "name": "vsubl_high_u32", "arguments": [ - "poly16x4_t a", - "poly16x4_t b" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "poly16x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.4S" }, "b": { - "register": "Vm.4H" + "register": "Vm.4S" } }, "Architectures": [ @@ -113115,26 +317897,26 @@ ], "instructions": [ [ - "TRN2" + "USUBL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2_p8", + "name": "vsubl_high_u8", "arguments": [ - "poly8x8_t a", - "poly8x8_t b" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "poly8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.16B" }, "b": { - "register": "Vm.8B" + "register": "Vm.16B" } }, "Architectures": [ @@ -113142,19 +317924,19 @@ ], "instructions": [ [ - "TRN2" + "USUBL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2_s16", + "name": "vsubl_s16", "arguments": [ "int16x4_t a", "int16x4_t b" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -113165,23 +317947,25 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "SSUBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2_s32", + "name": "vsubl_s32", "arguments": [ "int32x2_t a", "int32x2_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { @@ -113192,23 +317976,25 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "SSUBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2_s8", + "name": "vsubl_s8", "arguments": [ "int8x8_t a", "int8x8_t b" ], "return_type": { - "value": "int8x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -113219,23 +318005,25 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "SSUBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2_u16", + "name": "vsubl_u16", "arguments": [ "uint16x4_t a", "uint16x4_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { @@ -113246,23 +318034,25 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "USUBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2_u32", + "name": "vsubl_u32", "arguments": [ "uint32x2_t a", "uint32x2_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { @@ -113273,23 +318063,25 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "USUBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2_u8", + "name": "vsubl_u8", "arguments": [ "uint8x8_t a", "uint8x8_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { @@ -113300,17 +318092,19 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "USUBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_f16", + "name": "vsubq_f16", "arguments": [ "float16x8_t a", "float16x8_t b" @@ -113327,17 +318121,18 @@ } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "TRN2" + "FSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_f32", + "name": "vsubq_f32", "arguments": [ "float32x4_t a", "float32x4_t b" @@ -113354,17 +318149,19 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "FSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_f64", + "name": "vsubq_f64", "arguments": [ "float64x2_t a", "float64x2_t b" @@ -113385,19 +318182,19 @@ ], "instructions": [ [ - "TRN2" + "FSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_p16", + "name": "vsubq_s16", "arguments": [ - "poly16x8_t a", - "poly16x8_t b" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "poly16x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -113408,219 +318205,235 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_p64", + "name": "vsubq_s32", "arguments": [ - "poly64x2_t a", - "poly64x2_t b" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "poly64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.4S" }, "b": { - "register": "Vm.2D" + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_p8", + "name": "vsubq_s64", "arguments": [ - "poly8x16_t a", - "poly8x16_t b" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.2D" }, "b": { - "register": "Vm.16B" + "register": "Vm.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_s16", + "name": "vsubq_s8", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.16B" }, "b": { - "register": "Vm.8H" + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_s32", + "name": "vsubq_u16", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.8H" }, "b": { - "register": "Vm.4S" + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_s64", + "name": "vsubq_u32", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.4S" }, "b": { - "register": "Vm.2D" + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_s8", + "name": "vsubq_u64", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "int8x16_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.2D" }, "b": { - "register": "Vm.16B" + "register": "Vm.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_u16", + "name": "vsubq_u8", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.16B" }, "b": { - "register": "Vm.8H" + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_u32", + "name": "vsubw_high_s16", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int32x4_t a", + "int16x8_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4S" }, "b": { - "register": "Vm.4S" + "register": "Vm.8H" } }, "Architectures": [ @@ -113628,26 +318441,26 @@ ], "instructions": [ [ - "TRN2" + "SSUBW2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_u64", + "name": "vsubw_high_s32", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "int64x2_t a", + "int32x4_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2D" }, "b": { - "register": "Vm.2D" + "register": "Vm.4S" } }, "Architectures": [ @@ -113655,23 +318468,23 @@ ], "instructions": [ [ - "TRN2" + "SSUBW2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_u8", + "name": "vsubw_high_s8", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "int16x8_t a", + "int8x16_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, "b": { "register": "Vm.16B" @@ -113682,143 +318495,104 @@ ], "instructions": [ [ - "TRN2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vtrn_f16", - "arguments": [ - "float16x4_t a", - "float16x4_t b" - ], - "return_type": { - "value": "float16x4x2_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "TRN1", - "TRN2" + "SSUBW2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn_f32", + "name": "vsubw_high_u16", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "uint32x4_t a", + "uint16x8_t b" ], "return_type": { - "value": "float32x2x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.4S" }, "b": { - "register": "Vm.2S" + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TRN1", - "TRN2" + "USUBW2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn_p16", + "name": "vsubw_high_u32", "arguments": [ - "poly16x4_t a", - "poly16x4_t b" + "uint64x2_t a", + "uint32x4_t b" ], "return_type": { - "value": "poly16x4x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.2D" }, "b": { - "register": "Vm.4H" + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TRN1", - "TRN2" + "USUBW2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn_p8", + "name": "vsubw_high_u8", "arguments": [ - "poly8x8_t a", - "poly8x8_t b" + "uint16x8_t a", + "uint8x16_t b" ], "return_type": { - "value": "poly8x8x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, "b": { - "register": "Vm.8B" + "register": "Vm.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TRN1", - "TRN2" + "USUBW2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn_s16", + "name": "vsubw_s16", "arguments": [ - "int16x4_t a", + "int32x4_t a", "int16x4_t b" ], "return_type": { - "value": "int16x4x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.4S" }, "b": { "register": "Vm.4H" @@ -113831,24 +318605,23 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "SSUBW" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn_s32", + "name": "vsubw_s32", "arguments": [ - "int32x2_t a", + "int64x2_t a", "int32x2_t b" ], "return_type": { - "value": "int32x2x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.2D" }, "b": { "register": "Vm.2S" @@ -113861,24 +318634,23 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "SSUBW" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn_s8", + "name": "vsubw_s8", "arguments": [ - "int8x8_t a", + "int16x8_t a", "int8x8_t b" ], "return_type": { - "value": "int8x8x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, "b": { "register": "Vm.8B" @@ -113891,24 +318663,23 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "SSUBW" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn_u16", + "name": "vsubw_u16", "arguments": [ - "uint16x4_t a", + "uint32x4_t a", "uint16x4_t b" ], "return_type": { - "value": "uint16x4x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.4S" }, "b": { "register": "Vm.4H" @@ -113921,24 +318692,23 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "USUBW" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn_u32", + "name": "vsubw_u32", "arguments": [ - "uint32x2_t a", + "uint64x2_t a", "uint32x2_t b" ], "return_type": { - "value": "uint32x2x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.2D" }, "b": { "register": "Vm.2S" @@ -113951,24 +318721,23 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "USUBW" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn_u8", + "name": "vsubw_u8", "arguments": [ - "uint8x8_t a", + "uint16x8_t a", "uint8x8_t b" ], "return_type": { - "value": "uint8x8x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, "b": { "register": "Vm.8B" @@ -113981,147 +318750,171 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "USUBW" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrnq_f16", + "name": "vsudot_lane_s32", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "int32x2_t r", + "int8x8_t a", + "uint8x8_t b", + "const int lane" ], "return_type": { - "value": "float16x8x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" }, "b": { - "register": "Vm.8H" + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "TRN1", - "TRN2" + "SUDOT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrnq_f32", + "name": "vsudot_laneq_s32", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "int32x2_t r", + "int8x8_t a", + "uint8x16_t b", + "const int lane" ], "return_type": { - "value": "float32x4x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.8B" }, "b": { - "register": "Vm.4S" + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TRN1", - "TRN2" + "SUDOT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrnq_p16", + "name": "vsudotq_lane_s32", "arguments": [ - "poly16x8_t a", - "poly16x8_t b" + "int32x4_t r", + "int8x16_t a", + "uint8x8_t b", + "const int lane" ], "return_type": { - "value": "poly16x8x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" }, "b": { - "register": "Vm.8H" + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TRN1", - "TRN2" + "SUDOT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrnq_p8", + "name": "vsudotq_laneq_s32", "arguments": [ - "poly8x16_t a", - "poly8x16_t b" + "int32x4_t r", + "int8x16_t a", + "uint8x16_t b", + "const int lane" ], "return_type": { - "value": "poly8x16x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8B" }, "b": { - "register": "Vm.16B" + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TRN1", - "TRN2" + "SUDOT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrnq_s16", + "name": "vtbl1_p8", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "poly8x8_t a", + "uint8x8_t idx" ], "return_type": { - "value": "int16x8x2_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "Zeros(64):a": { + "register": "Vn.16B" }, - "b": { - "register": "Vm.8H" + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114131,27 +318924,26 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrnq_s32", + "name": "vtbl1_s8", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int8x8_t a", + "int8x8_t idx" ], "return_type": { - "value": "int32x4x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "Zeros(64):a": { + "register": "Vn.16B" }, - "b": { - "register": "Vm.4S" + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114161,27 +318953,26 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrnq_s8", + "name": "vtbl1_u8", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "uint8x8_t a", + "uint8x8_t idx" ], "return_type": { - "value": "int8x16x2_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { + "Zeros(64):a": { "register": "Vn.16B" }, - "b": { - "register": "Vm.16B" + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114191,27 +318982,26 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrnq_u16", + "name": "vtbl2_p8", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "poly8x8x2_t a", + "uint8x8_t idx" ], "return_type": { - "value": "uint16x8x2_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "a.val[1]:a.val[0]": { + "register": "Vn.16B" }, - "b": { - "register": "Vm.8H" + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114221,27 +319011,26 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrnq_u32", + "name": "vtbl2_s8", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int8x8x2_t a", + "int8x8_t idx" ], "return_type": { - "value": "uint32x4x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "a.val[1]:a.val[0]": { + "register": "Vn.16B" }, - "b": { - "register": "Vm.4S" + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114251,27 +319040,26 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrnq_u8", + "name": "vtbl2_u8", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "uint8x8x2_t a", + "uint8x8_t idx" ], "return_type": { - "value": "uint8x16x2_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { + "a.val[1]:a.val[0]": { "register": "Vn.16B" }, - "b": { - "register": "Vm.16B" + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114281,54 +319069,60 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtst_p64", + "name": "vtbl3_p8", "arguments": [ - "poly64x1_t a", - "poly64x1_t b" + "poly8x8x3_t a", + "uint8x8_t idx" ], "return_type": { - "value": "uint64x1_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "Zeros(64):a.val[2]": { + "register": "Vn+1.16B" }, - "b": { - "register": "Dm" + "a.val[1]:a.val[0]": { + "register": "Vn.16B" + }, + "idx": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "CMTST" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtst_p8", + "name": "vtbl3_s8", "arguments": [ - "poly8x8_t a", - "poly8x8_t b" + "int8x8x3_t a", + "int8x8_t idx" ], "return_type": { - "value": "uint8x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "Zeros(64):a.val[2]": { + "register": "Vn+1.16B" }, - "b": { + "a.val[1]:a.val[0]": { + "register": "Vn.16B" + }, + "idx": { "register": "Vm.8B" } }, @@ -114339,26 +319133,29 @@ ], "instructions": [ [ - "CMTST" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtst_s16", + "name": "vtbl3_u8", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "uint8x8x3_t a", + "uint8x8_t idx" ], "return_type": { - "value": "uint16x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "Zeros(64):a.val[2]": { + "register": "Vn+1.16B" }, - "b": { - "register": "Vm.4H" + "a.val[1]:a.val[0]": { + "register": "Vn.16B" + }, + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114368,26 +319165,29 @@ ], "instructions": [ [ - "CMTST" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtst_s32", + "name": "vtbl4_p8", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "poly8x8x4_t a", + "uint8x8_t idx" ], "return_type": { - "value": "uint32x2_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "a.val[1]:a.val[0]": { + "register": "Vn.16B" }, - "b": { - "register": "Vm.2S" + "a.val[3]:a.val[2]": { + "register": "Vn+1.16B" + }, + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114397,52 +319197,60 @@ ], "instructions": [ [ - "CMTST" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtst_s64", + "name": "vtbl4_s8", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "int8x8x4_t a", + "int8x8_t idx" ], "return_type": { - "value": "uint64x1_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "a.val[1]:a.val[0]": { + "register": "Vn.16B" }, - "b": { - "register": "Dm" + "a.val[3]:a.val[2]": { + "register": "Vn+1.16B" + }, + "idx": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "CMTST" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtst_s8", + "name": "vtbl4_u8", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "uint8x8x4_t a", + "uint8x8_t idx" ], "return_type": { "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "a.val[1]:a.val[0]": { + "register": "Vn.16B" }, - "b": { + "a.val[3]:a.val[2]": { + "register": "Vn+1.16B" + }, + "idx": { "register": "Vm.8B" } }, @@ -114453,26 +319261,30 @@ ], "instructions": [ [ - "CMTST" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtst_u16", + "name": "vtbx1_p8", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "poly8x8_t a", + "poly8x8_t b", + "uint8x8_t idx" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { + "Zeros(64):b": { + "register": "Vn.16B" + }, "a": { - "register": "Vn.4H" + "register": "Vd.8B" }, - "b": { - "register": "Vm.4H" + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114482,26 +319294,33 @@ ], "instructions": [ [ - "CMTST" + "MOVI", + "CMHS", + "TBL", + "BIF" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtst_u32", + "name": "vtbx1_s8", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "int8x8_t a", + "int8x8_t b", + "int8x8_t idx" ], "return_type": { - "value": "uint32x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { + "Zeros(64):b": { + "register": "Vn.16B" + }, "a": { - "register": "Vn.2S" + "register": "Vd.8B" }, - "b": { - "register": "Vm.2S" + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114511,52 +319330,68 @@ ], "instructions": [ [ - "CMTST" + "MOVI", + "CMHS", + "TBL", + "BIF" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtst_u64", + "name": "vtbx1_u8", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "uint8x8_t a", + "uint8x8_t b", + "uint8x8_t idx" ], "return_type": { - "value": "uint64x1_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { + "Zeros(64):b": { + "register": "Vn.16B" + }, "a": { - "register": "Dn" + "register": "Vd.8B" }, - "b": { - "register": "Dm" + "idx": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "CMTST" + "MOVI", + "CMHS", + "TBL", + "BIF" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtst_u8", + "name": "vtbx2_p8", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "poly8x8_t a", + "poly8x8x2_t b", + "uint8x8_t idx" ], "return_type": { - "value": "uint8x8_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vd.8B" }, - "b": { + "b.val[1]:b.val[0]": { + "register": "Vn.16B" + }, + "idx": { "register": "Vm.8B" } }, @@ -114567,108 +319402,138 @@ ], "instructions": [ [ - "CMTST" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstd_s64", + "name": "vtbx2_s8", "arguments": [ - "int64_t a", - "int64_t b" + "int8x8_t a", + "int8x8x2_t b", + "int8x8_t idx" ], "return_type": { - "value": "uint64_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.8B" }, - "b": { - "register": "Dm" + "b.val[1]:b.val[0]": { + "register": "Vn.16B" + }, + "idx": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "CMTST" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstd_u64", + "name": "vtbx2_u8", "arguments": [ - "uint64_t a", - "uint64_t b" + "uint8x8_t a", + "uint8x8x2_t b", + "uint8x8_t idx" ], "return_type": { - "value": "uint64_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.8B" }, - "b": { - "register": "Dm" + "b.val[1]:b.val[0]": { + "register": "Vn.16B" + }, + "idx": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "CMTST" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstq_p64", + "name": "vtbx3_p8", "arguments": [ - "poly64x2_t a", - "poly64x2_t b" + "poly8x8_t a", + "poly8x8x3_t b", + "uint8x8_t idx" ], "return_type": { - "value": "uint64x2_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { + "Zeros(64):b.val[2]": { + "register": "Vn+1.16B" + }, "a": { - "register": "Vn.2D" + "register": "Vd.8B" }, - "b": { - "register": "Vm.2D" + "b.val[1]:b.val[0]": { + "register": "Vn.16B" + }, + "idx": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "CMTST" + "MOVI", + "CMHS", + "TBL", + "BIF" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstq_p8", + "name": "vtbx3_s8", "arguments": [ - "poly8x16_t a", - "poly8x16_t b" + "int8x8_t a", + "int8x8x3_t b", + "int8x8_t idx" ], "return_type": { - "value": "uint8x16_t" + "value": "int8x8_t" }, "Arguments_Preparation": { + "Zeros(64):b.val[2]": { + "register": "Vn+1.16B" + }, "a": { + "register": "Vd.8B" + }, + "b.val[1]:b.val[0]": { "register": "Vn.16B" }, - "b": { - "register": "Vm.16B" + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114678,26 +319543,36 @@ ], "instructions": [ [ - "CMTST" + "MOVI", + "CMHS", + "TBL", + "BIF" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstq_s16", + "name": "vtbx3_u8", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "uint8x8_t a", + "uint8x8x3_t b", + "uint8x8_t idx" ], "return_type": { - "value": "uint16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { + "Zeros(64):b.val[2]": { + "register": "Vn+1.16B" + }, "a": { - "register": "Vn.8H" + "register": "Vd.8B" }, - "b": { - "register": "Vm.8H" + "b.val[1]:b.val[0]": { + "register": "Vn.16B" + }, + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114707,26 +319582,36 @@ ], "instructions": [ [ - "CMTST" + "MOVI", + "CMHS", + "TBL", + "BIF" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstq_s32", + "name": "vtbx4_p8", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "poly8x8_t a", + "poly8x8x4_t b", + "uint8x8_t idx" ], "return_type": { - "value": "uint32x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.8B" }, - "b": { - "register": "Vm.4S" + "b.val[1]:b.val[0]": { + "register": "Vn.16B" + }, + "b.val[3]:b.val[2]": { + "register": "Vn+1.16B" + }, + "c": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114736,53 +319621,69 @@ ], "instructions": [ [ - "CMTST" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstq_s64", + "name": "vtbx4_s8", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "int8x8_t a", + "int8x8x4_t b", + "int8x8_t idx" ], "return_type": { - "value": "uint64x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.8B" }, - "b": { - "register": "Vm.2D" + "b.val[1]:b.val[0]": { + "register": "Vn.16B" + }, + "b.val[3]:b.val[2]": { + "register": "Vn+1.16B" + }, + "c": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "CMTST" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstq_s8", + "name": "vtbx4_u8", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "uint8x8_t a", + "uint8x8x4_t b", + "uint8x8_t idx" ], "return_type": { - "value": "uint8x16_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.8B" + }, + "b.val[1]:b.val[0]": { "register": "Vn.16B" }, - "b": { - "register": "Vm.16B" + "b.val[3]:b.val[2]": { + "register": "Vn+1.16B" + }, + "c": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114792,84 +319693,80 @@ ], "instructions": [ [ - "CMTST" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstq_u16", + "name": "vtrn1_f16", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" }, "b": { - "register": "Vm.8H" + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMTST" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstq_u32", + "name": "vtrn1_f32", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2S" }, "b": { - "register": "Vm.4S" + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMTST" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstq_u64", + "name": "vtrn1_p16", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "poly16x4_t a", + "poly16x4_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.4H" }, "b": { - "register": "Vm.2D" + "register": "Vm.4H" } }, "Architectures": [ @@ -114877,55 +319774,53 @@ ], "instructions": [ [ - "CMTST" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstq_u8", + "name": "vtrn1_p8", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "poly8x8_t a", + "poly8x8_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8B" }, "b": { - "register": "Vm.16B" + "register": "Vm.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMTST" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqadd_s16", + "name": "vtrn1_s16", "arguments": [ "int16x4_t a", - "uint16x4_t b" + "int16x4_t b" ], "return_type": { "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.4H" }, "b": { - "register": "Vn.4H" + "register": "Vm.4H" } }, "Architectures": [ @@ -114933,26 +319828,26 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqadd_s32", + "name": "vtrn1_s32", "arguments": [ "int32x2_t a", - "uint32x2_t b" + "int32x2_t b" ], "return_type": { "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.2S" }, "b": { - "register": "Vn.2S" + "register": "Vm.2S" } }, "Architectures": [ @@ -114960,26 +319855,26 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqadd_s64", + "name": "vtrn1_s8", "arguments": [ - "int64x1_t a", - "uint64x1_t b" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "int64x1_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.8B" }, "b": { - "register": "Dn" + "register": "Vm.8B" } }, "Architectures": [ @@ -114987,26 +319882,26 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqadd_s8", + "name": "vtrn1_u16", "arguments": [ - "int8x8_t a", - "uint8x8_t b" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "int8x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vn.4H" }, "b": { - "register": "Vn.8B" + "register": "Vm.4H" } }, "Architectures": [ @@ -115014,26 +319909,26 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqaddb_s8", + "name": "vtrn1_u32", "arguments": [ - "int8_t a", - "uint8_t b" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "int8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Bd" + "register": "Vn.2S" }, "b": { - "register": "Bn" + "register": "Vm.2S" } }, "Architectures": [ @@ -115041,26 +319936,26 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqaddd_s64", + "name": "vtrn1_u8", "arguments": [ - "int64_t a", - "uint64_t b" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "int64_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.8B" }, "b": { - "register": "Dn" + "register": "Vm.8B" } }, "Architectures": [ @@ -115068,26 +319963,26 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqaddh_s16", + "name": "vtrn1q_f16", "arguments": [ - "int16_t a", - "uint16_t b" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "int16_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hd" + "register": "Vn.8H" }, "b": { - "register": "Hn" + "register": "Vm.8H" } }, "Architectures": [ @@ -115095,26 +319990,26 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqaddq_s16", + "name": "vtrn1q_f32", "arguments": [ - "int16x8_t a", - "uint16x8_t b" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "int16x8_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vn.4S" }, "b": { - "register": "Vn.8H" + "register": "Vm.4S" } }, "Architectures": [ @@ -115122,26 +320017,26 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqaddq_s32", + "name": "vtrn1q_f64", "arguments": [ - "int32x4_t a", - "uint32x4_t b" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.2D" }, "b": { - "register": "Vn.4S" + "register": "Vm.2D" } }, "Architectures": [ @@ -115149,26 +320044,26 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqaddq_s64", + "name": "vtrn1q_p16", "arguments": [ - "int64x2_t a", - "uint64x2_t b" + "poly16x8_t a", + "poly16x8_t b" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.8H" }, "b": { - "register": "Vn.2D" + "register": "Vm.8H" } }, "Architectures": [ @@ -115176,26 +320071,26 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqaddq_s8", + "name": "vtrn1q_p64", "arguments": [ - "int8x16_t a", - "uint8x16_t b" + "poly64x2_t a", + "poly64x2_t b" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vn.2D" }, "b": { - "register": "Vn.16B" + "register": "Vm.2D" } }, "Architectures": [ @@ -115203,26 +320098,26 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqadds_s32", + "name": "vtrn1q_p8", "arguments": [ - "int32_t a", - "uint32_t b" + "poly8x16_t a", + "poly8x16_t b" ], "return_type": { - "value": "int32_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Vn.16B" }, "b": { - "register": "Sn" + "register": "Vm.16B" } }, "Architectures": [ @@ -115230,72 +320125,53 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vusdot_lane_s32", + "name": "vtrn1q_s16", "arguments": [ - "int32x2_t r", - "uint8x8_t a", - "int8x8_t b", - "const int lane" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, "b": { - "register": "Vm.4B" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.2S" + "register": "Vm.8H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "USDOT" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vusdot_laneq_s32", + "name": "vtrn1q_s32", "arguments": [ - "int32x2_t r", - "uint8x8_t a", - "int8x16_t b", - "const int lane" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4S" }, "b": { - "register": "Vm.4B" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.2S" + "register": "Vm.4S" } }, "Architectures": [ @@ -115303,104 +320179,80 @@ ], "instructions": [ [ - "USDOT" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vusdot_s32", + "name": "vtrn1q_s64", "arguments": [ - "int32x2_t r", - "uint8x8_t a", - "int8x8_t b" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.2D" }, "b": { - "register": "Vm.8B" - }, - "r": { - "register": "Vd.2S" + "register": "Vm.2D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "USDOT" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vusdotq_lane_s32", + "name": "vtrn1q_s8", "arguments": [ - "int32x4_t r", - "uint8x16_t a", - "int8x8_t b", - "const int lane" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { "register": "Vn.16B" }, "b": { - "register": "Vm.4B" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.4S" + "register": "Vm.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "USDOT" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vusdotq_laneq_s32", + "name": "vtrn1q_u16", "arguments": [ - "int32x4_t r", - "uint8x16_t a", - "int8x16_t b", - "const int lane" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, "b": { - "register": "Vm.4B" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.4S" + "register": "Vm.8H" } }, "Architectures": [ @@ -115408,30 +320260,53 @@ ], "instructions": [ [ - "USDOT" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vusdotq_s32", + "name": "vtrn1q_u32", "arguments": [ - "int32x4_t r", - "uint8x16_t a", - "int8x16_t b" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" }, "b": { - "register": "Vm.16B" + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vtrn1q_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" }, - "r": { - "register": "Vd.4S" + "b": { + "register": "Vm.2D" } }, "Architectures": [ @@ -115439,20 +320314,19 @@ ], "instructions": [ [ - "USDOT" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vusmmlaq_s32", + "name": "vtrn1q_u8", "arguments": [ - "int32x4_t r", "uint8x16_t a", - "int8x16_t b" + "uint8x16_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { @@ -115460,24 +320334,20 @@ }, "b": { "register": "Vm.16B" - }, - "r": { - "register": "Vd.4S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "USMMLA" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1_f16", + "name": "vtrn2_f16", "arguments": [ "float16x4_t a", "float16x4_t b" @@ -115498,13 +320368,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1_f32", + "name": "vtrn2_f32", "arguments": [ "float32x2_t a", "float32x2_t b" @@ -115525,13 +320395,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1_p16", + "name": "vtrn2_p16", "arguments": [ "poly16x4_t a", "poly16x4_t b" @@ -115552,13 +320422,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1_p8", + "name": "vtrn2_p8", "arguments": [ "poly8x8_t a", "poly8x8_t b" @@ -115579,13 +320449,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1_s16", + "name": "vtrn2_s16", "arguments": [ "int16x4_t a", "int16x4_t b" @@ -115606,13 +320476,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1_s32", + "name": "vtrn2_s32", "arguments": [ "int32x2_t a", "int32x2_t b" @@ -115633,13 +320503,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1_s8", + "name": "vtrn2_s8", "arguments": [ "int8x8_t a", "int8x8_t b" @@ -115660,13 +320530,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1_u16", + "name": "vtrn2_u16", "arguments": [ "uint16x4_t a", "uint16x4_t b" @@ -115687,13 +320557,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1_u32", + "name": "vtrn2_u32", "arguments": [ "uint32x2_t a", "uint32x2_t b" @@ -115714,13 +320584,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1_u8", + "name": "vtrn2_u8", "arguments": [ "uint8x8_t a", "uint8x8_t b" @@ -115741,13 +320611,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_f16", + "name": "vtrn2q_f16", "arguments": [ "float16x8_t a", "float16x8_t b" @@ -115768,13 +320638,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_f32", + "name": "vtrn2q_f32", "arguments": [ "float32x4_t a", "float32x4_t b" @@ -115795,13 +320665,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_f64", + "name": "vtrn2q_f64", "arguments": [ "float64x2_t a", "float64x2_t b" @@ -115822,13 +320692,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_p16", + "name": "vtrn2q_p16", "arguments": [ "poly16x8_t a", "poly16x8_t b" @@ -115849,13 +320719,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_p64", + "name": "vtrn2q_p64", "arguments": [ "poly64x2_t a", "poly64x2_t b" @@ -115876,13 +320746,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_p8", + "name": "vtrn2q_p8", "arguments": [ "poly8x16_t a", "poly8x16_t b" @@ -115903,13 +320773,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_s16", + "name": "vtrn2q_s16", "arguments": [ "int16x8_t a", "int16x8_t b" @@ -115930,13 +320800,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_s32", + "name": "vtrn2q_s32", "arguments": [ "int32x4_t a", "int32x4_t b" @@ -115957,13 +320827,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_s64", + "name": "vtrn2q_s64", "arguments": [ "int64x2_t a", "int64x2_t b" @@ -115984,13 +320854,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_s8", + "name": "vtrn2q_s8", "arguments": [ "int8x16_t a", "int8x16_t b" @@ -116011,13 +320881,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_u16", + "name": "vtrn2q_u16", "arguments": [ "uint16x8_t a", "uint16x8_t b" @@ -116038,13 +320908,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_u32", + "name": "vtrn2q_u32", "arguments": [ "uint32x4_t a", "uint32x4_t b" @@ -116065,13 +320935,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_u64", + "name": "vtrn2q_u64", "arguments": [ "uint64x2_t a", "uint64x2_t b" @@ -116092,13 +320962,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_u8", + "name": "vtrn2q_u8", "arguments": [ "uint8x16_t a", "uint8x16_t b" @@ -116119,19 +320989,19 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2_f16", + "name": "vtrn_f16", "arguments": [ "float16x4_t a", "float16x4_t b" ], "return_type": { - "value": "float16x4_t" + "value": "float16x4x2_t" }, "Arguments_Preparation": { "a": { @@ -116142,23 +321012,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2_f32", + "name": "vtrn_f32", "arguments": [ "float32x2_t a", "float32x2_t b" ], "return_type": { - "value": "float32x2_t" + "value": "float32x2x2_t" }, "Arguments_Preparation": { "a": { @@ -116169,23 +321042,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2_p16", + "name": "vtrn_p16", "arguments": [ "poly16x4_t a", "poly16x4_t b" ], "return_type": { - "value": "poly16x4_t" + "value": "poly16x4x2_t" }, "Arguments_Preparation": { "a": { @@ -116196,23 +321072,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2_p8", + "name": "vtrn_p8", "arguments": [ "poly8x8_t a", "poly8x8_t b" ], "return_type": { - "value": "poly8x8_t" + "value": "poly8x8x2_t" }, "Arguments_Preparation": { "a": { @@ -116223,23 +321102,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2_s16", + "name": "vtrn_s16", "arguments": [ "int16x4_t a", "int16x4_t b" ], "return_type": { - "value": "int16x4_t" + "value": "int16x4x2_t" }, "Arguments_Preparation": { "a": { @@ -116250,23 +321132,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2_s32", + "name": "vtrn_s32", "arguments": [ "int32x2_t a", "int32x2_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int32x2x2_t" }, "Arguments_Preparation": { "a": { @@ -116277,23 +321162,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2_s8", + "name": "vtrn_s8", "arguments": [ "int8x8_t a", "int8x8_t b" ], "return_type": { - "value": "int8x8_t" + "value": "int8x8x2_t" }, "Arguments_Preparation": { "a": { @@ -116304,23 +321192,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2_u16", + "name": "vtrn_u16", "arguments": [ "uint16x4_t a", "uint16x4_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "uint16x4x2_t" }, "Arguments_Preparation": { "a": { @@ -116331,23 +321222,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2_u32", + "name": "vtrn_u32", "arguments": [ "uint32x2_t a", "uint32x2_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "uint32x2x2_t" }, "Arguments_Preparation": { "a": { @@ -116358,23 +321252,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2_u8", + "name": "vtrn_u8", "arguments": [ "uint8x8_t a", "uint8x8_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "uint8x8x2_t" }, "Arguments_Preparation": { "a": { @@ -116385,23 +321282,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_f16", + "name": "vtrnq_f16", "arguments": [ "float16x8_t a", "float16x8_t b" ], "return_type": { - "value": "float16x8_t" + "value": "float16x8x2_t" }, "Arguments_Preparation": { "a": { @@ -116412,23 +321312,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_f32", + "name": "vtrnq_f32", "arguments": [ "float32x4_t a", "float32x4_t b" ], "return_type": { - "value": "float32x4_t" + "value": "float32x4x2_t" }, "Arguments_Preparation": { "a": { @@ -116439,50 +321342,86 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_f64", + "name": "vtrnq_p16", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "poly16x8_t a", + "poly16x8_t b" ], "return_type": { - "value": "float64x2_t" + "value": "poly16x8x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.8H" }, "b": { - "register": "Vm.2D" + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_p16", + "name": "vtrnq_p8", "arguments": [ - "poly16x8_t a", - "poly16x8_t b" + "poly8x16_t a", + "poly8x16_t b" ], "return_type": { - "value": "poly16x8_t" + "value": "poly8x16x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "TRN1", + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vtrnq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int16x8x2_t" }, "Arguments_Preparation": { "a": { @@ -116493,50 +321432,56 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_p64", + "name": "vtrnq_s32", "arguments": [ - "poly64x2_t a", - "poly64x2_t b" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "poly64x2_t" + "value": "int32x4x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.4S" }, "b": { - "register": "Vm.2D" + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_p8", + "name": "vtrnq_s8", "arguments": [ - "poly8x16_t a", - "poly8x16_t b" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "int8x16x2_t" }, "Arguments_Preparation": { "a": { @@ -116547,23 +321492,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_s16", + "name": "vtrnq_u16", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int16x8_t" + "value": "uint16x8x2_t" }, "Arguments_Preparation": { "a": { @@ -116574,23 +321522,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_s32", + "name": "vtrnq_u32", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4x2_t" }, "Arguments_Preparation": { "a": { @@ -116601,165 +321552,178 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_s64", + "name": "vtrnq_u8", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint8x16x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.16B" }, "b": { - "register": "Vm.2D" + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_s8", + "name": "vtst_p64", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "poly64x1_t a", + "poly64x1_t b" ], "return_type": { - "value": "int8x16_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Dn" }, "b": { - "register": "Vm.16B" + "register": "Dm" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_u16", + "name": "vtst_p8", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "poly8x8_t a", + "poly8x8_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" }, "b": { - "register": "Vm.8H" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_u32", + "name": "vtst_s16", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.4H" }, "b": { - "register": "Vm.4S" + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_u64", + "name": "vtst_s32", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.2S" }, "b": { - "register": "Vm.2D" + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_u8", + "name": "vtst_s64", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "int64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Dn" }, "b": { - "register": "Vm.16B" + "register": "Dm" } }, "Architectures": [ @@ -116767,19 +321731,48 @@ ], "instructions": [ [ - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp_f16", + "name": "vtst_s8", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "float16x4x2_t" + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMTST" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vtst_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { @@ -116796,20 +321789,19 @@ ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp_f32", + "name": "vtst_u32", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "float32x2x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { @@ -116826,50 +321818,46 @@ ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp_p16", + "name": "vtst_u64", "arguments": [ - "poly16x4_t a", - "poly16x4_t b" + "uint64x1_t a", + "uint64x1_t b" ], "return_type": { - "value": "poly16x4x2_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Dn" }, "b": { - "register": "Vm.4H" + "register": "Dm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp_p8", + "name": "vtst_u8", "arguments": [ - "poly8x8_t a", - "poly8x8_t b" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "poly8x8x2_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { @@ -116886,87 +321874,108 @@ ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp_s16", + "name": "vtstd_s64", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "int64_t a", + "int64_t b" ], "return_type": { - "value": "int16x4x2_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Dn" }, "b": { - "register": "Vm.4H" + "register": "Dm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp_s32", + "name": "vtstd_u64", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "uint64_t a", + "uint64_t b" ], "return_type": { - "value": "int32x2x2_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Dn" }, "b": { - "register": "Vm.2S" + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMTST" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vtstq_p64", + "arguments": [ + "poly64x2_t a", + "poly64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp_s8", + "name": "vtstq_p8", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "poly8x16_t a", + "poly8x16_t b" ], "return_type": { - "value": "int8x8x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.16B" }, "b": { - "register": "Vm.8B" + "register": "Vm.16B" } }, "Architectures": [ @@ -116976,27 +321985,26 @@ ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp_u16", + "name": "vtstq_s16", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "uint16x4x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.8H" }, "b": { - "register": "Vm.4H" + "register": "Vm.8H" } }, "Architectures": [ @@ -117006,27 +322014,26 @@ ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp_u32", + "name": "vtstq_s32", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "uint32x2x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.4S" }, "b": { - "register": "Vm.2S" + "register": "Vm.4S" } }, "Architectures": [ @@ -117036,27 +322043,53 @@ ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp_u8", + "name": "vtstq_s64", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "uint8x8x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.2D" }, "b": { - "register": "Vm.8B" + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMTST" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vtstq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" } }, "Architectures": [ @@ -117066,20 +322099,19 @@ ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzpq_f16", + "name": "vtstq_u16", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "float16x8x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { @@ -117096,20 +322128,19 @@ ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzpq_f32", + "name": "vtstq_u32", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "float32x4x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { @@ -117126,50 +322157,46 @@ ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzpq_p16", + "name": "vtstq_u64", "arguments": [ - "poly16x8_t a", - "poly16x8_t b" + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "poly16x8x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2D" }, "b": { - "register": "Vm.8H" + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzpq_p8", + "name": "vtstq_u8", "arguments": [ - "poly8x16_t a", - "poly8x16_t b" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "poly8x16x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { @@ -117186,170 +322213,522 @@ ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzpq_s16", + "name": "vuqadd_s16", + "arguments": [ + "int16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqadd_s32", + "arguments": [ + "int32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqadd_s64", + "arguments": [ + "int64x1_t a", + "uint64x1_t b" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dd" + }, + "b": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqadd_s8", + "arguments": [ + "int8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqaddb_s8", + "arguments": [ + "int8_t a", + "uint8_t b" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Bd" + }, + "b": { + "register": "Bn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqaddd_s64", + "arguments": [ + "int64_t a", + "uint64_t b" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dd" + }, + "b": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqaddh_s16", + "arguments": [ + "int16_t a", + "uint16_t b" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hd" + }, + "b": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqaddq_s16", + "arguments": [ + "int16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqaddq_s32", + "arguments": [ + "int32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqaddq_s64", + "arguments": [ + "int64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqaddq_s8", + "arguments": [ + "int8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqadds_s32", + "arguments": [ + "int32_t a", + "uint32_t b" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sd" + }, + "b": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vusdot_lane_s32", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "int32x2_t r", + "uint8x8_t a", + "int8x8_t b", + "const int lane" ], "return_type": { - "value": "int16x8x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" }, "b": { - "register": "Vm.8H" + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "UZP1", - "UZP2" + "USDOT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzpq_s32", + "name": "vusdot_laneq_s32", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int32x2_t r", + "uint8x8_t a", + "int8x16_t b", + "const int lane" ], "return_type": { - "value": "int32x4x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.8B" }, "b": { - "register": "Vm.4S" + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UZP1", - "UZP2" + "USDOT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzpq_s8", + "name": "vusdot_s32", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "int32x2_t r", + "uint8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "int8x16x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8B" }, "b": { - "register": "Vm.16B" + "register": "Vm.8B" + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "UZP1", - "UZP2" + "USDOT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzpq_u16", + "name": "vusdotq_lane_s32", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "int32x4_t r", + "uint8x16_t a", + "int8x8_t b", + "const int lane" ], "return_type": { - "value": "uint16x8x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.16B" }, "b": { - "register": "Vm.8H" + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "UZP1", - "UZP2" + "USDOT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzpq_u32", + "name": "vusdotq_laneq_s32", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int32x4_t r", + "uint8x16_t a", + "int8x16_t b", + "const int lane" ], "return_type": { - "value": "uint32x4x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.16B" }, "b": { - "register": "Vm.4S" + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UZP1", - "UZP2" + "USDOT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzpq_u8", + "name": "vusdotq_s32", "arguments": [ + "int32x4_t r", "uint8x16_t a", - "uint8x16_t b" + "int8x16_t b" ], "return_type": { - "value": "uint8x16x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -117357,53 +322736,56 @@ }, "b": { "register": "Vm.16B" + }, + "r": { + "register": "Vd.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "UZP1", - "UZP2" + "USDOT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vxarq_u64", + "name": "vusmmlaq_s32", "arguments": [ - "uint64x2_t a", - "uint64x2_t b", - "const int imm6" + "int32x4_t r", + "uint8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.16B" }, - "b": {}, - "imm6": { - "minimum": 0, - "maximum": 63 + "b": { + "register": "Vm.16B" + }, + "r": { + "register": "Vd.4S" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "XAR" + "USMMLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1_f16", + "name": "vuzp1_f16", "arguments": [ "float16x4_t a", "float16x4_t b" @@ -117424,13 +322806,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1_f32", + "name": "vuzp1_f32", "arguments": [ "float32x2_t a", "float32x2_t b" @@ -117451,13 +322833,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1_p16", + "name": "vuzp1_p16", "arguments": [ "poly16x4_t a", "poly16x4_t b" @@ -117478,13 +322860,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1_p8", + "name": "vuzp1_p8", "arguments": [ "poly8x8_t a", "poly8x8_t b" @@ -117505,13 +322887,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1_s16", + "name": "vuzp1_s16", "arguments": [ "int16x4_t a", "int16x4_t b" @@ -117532,13 +322914,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1_s32", + "name": "vuzp1_s32", "arguments": [ "int32x2_t a", "int32x2_t b" @@ -117559,13 +322941,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1_s8", + "name": "vuzp1_s8", "arguments": [ "int8x8_t a", "int8x8_t b" @@ -117586,13 +322968,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1_u16", + "name": "vuzp1_u16", "arguments": [ "uint16x4_t a", "uint16x4_t b" @@ -117613,13 +322995,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1_u32", + "name": "vuzp1_u32", "arguments": [ "uint32x2_t a", "uint32x2_t b" @@ -117640,13 +323022,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1_u8", + "name": "vuzp1_u8", "arguments": [ "uint8x8_t a", "uint8x8_t b" @@ -117667,13 +323049,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_f16", + "name": "vuzp1q_f16", "arguments": [ "float16x8_t a", "float16x8_t b" @@ -117694,13 +323076,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_f32", + "name": "vuzp1q_f32", "arguments": [ "float32x4_t a", "float32x4_t b" @@ -117721,13 +323103,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_f64", + "name": "vuzp1q_f64", "arguments": [ "float64x2_t a", "float64x2_t b" @@ -117748,13 +323130,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_p16", + "name": "vuzp1q_p16", "arguments": [ "poly16x8_t a", "poly16x8_t b" @@ -117775,13 +323157,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_p64", + "name": "vuzp1q_p64", "arguments": [ "poly64x2_t a", "poly64x2_t b" @@ -117802,13 +323184,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_p8", + "name": "vuzp1q_p8", "arguments": [ "poly8x16_t a", "poly8x16_t b" @@ -117829,13 +323211,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_s16", + "name": "vuzp1q_s16", "arguments": [ "int16x8_t a", "int16x8_t b" @@ -117856,13 +323238,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_s32", + "name": "vuzp1q_s32", "arguments": [ "int32x4_t a", "int32x4_t b" @@ -117883,13 +323265,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_s64", + "name": "vuzp1q_s64", "arguments": [ "int64x2_t a", "int64x2_t b" @@ -117910,13 +323292,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_s8", + "name": "vuzp1q_s8", "arguments": [ "int8x16_t a", "int8x16_t b" @@ -117937,13 +323319,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_u16", + "name": "vuzp1q_u16", "arguments": [ "uint16x8_t a", "uint16x8_t b" @@ -117964,13 +323346,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_u32", + "name": "vuzp1q_u32", "arguments": [ "uint32x4_t a", "uint32x4_t b" @@ -117991,13 +323373,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_u64", + "name": "vuzp1q_u64", "arguments": [ "uint64x2_t a", "uint64x2_t b" @@ -118018,13 +323400,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_u8", + "name": "vuzp1q_u8", "arguments": [ "uint8x16_t a", "uint8x16_t b" @@ -118045,13 +323427,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2_f16", + "name": "vuzp2_f16", "arguments": [ "float16x4_t a", "float16x4_t b" @@ -118072,13 +323454,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2_f32", + "name": "vuzp2_f32", "arguments": [ "float32x2_t a", "float32x2_t b" @@ -118099,13 +323481,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2_p16", + "name": "vuzp2_p16", "arguments": [ "poly16x4_t a", "poly16x4_t b" @@ -118126,13 +323508,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2_p8", + "name": "vuzp2_p8", "arguments": [ "poly8x8_t a", "poly8x8_t b" @@ -118153,13 +323535,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2_s16", + "name": "vuzp2_s16", "arguments": [ "int16x4_t a", "int16x4_t b" @@ -118180,13 +323562,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2_s32", + "name": "vuzp2_s32", "arguments": [ "int32x2_t a", "int32x2_t b" @@ -118207,13 +323589,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2_s8", + "name": "vuzp2_s8", "arguments": [ "int8x8_t a", "int8x8_t b" @@ -118234,13 +323616,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2_u16", + "name": "vuzp2_u16", "arguments": [ "uint16x4_t a", "uint16x4_t b" @@ -118261,13 +323643,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2_u32", + "name": "vuzp2_u32", "arguments": [ "uint32x2_t a", "uint32x2_t b" @@ -118288,13 +323670,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2_u8", + "name": "vuzp2_u8", "arguments": [ "uint8x8_t a", "uint8x8_t b" @@ -118315,13 +323697,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_f16", + "name": "vuzp2q_f16", "arguments": [ "float16x8_t a", "float16x8_t b" @@ -118342,13 +323724,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_f32", + "name": "vuzp2q_f32", "arguments": [ "float32x4_t a", "float32x4_t b" @@ -118369,13 +323751,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_f64", + "name": "vuzp2q_f64", "arguments": [ "float64x2_t a", "float64x2_t b" @@ -118396,13 +323778,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_p16", + "name": "vuzp2q_p16", "arguments": [ "poly16x8_t a", "poly16x8_t b" @@ -118423,13 +323805,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_p64", + "name": "vuzp2q_p64", "arguments": [ "poly64x2_t a", "poly64x2_t b" @@ -118450,13 +323832,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_p8", + "name": "vuzp2q_p8", "arguments": [ "poly8x16_t a", "poly8x16_t b" @@ -118477,13 +323859,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_s16", + "name": "vuzp2q_s16", "arguments": [ "int16x8_t a", "int16x8_t b" @@ -118504,13 +323886,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_s32", + "name": "vuzp2q_s32", "arguments": [ "int32x4_t a", "int32x4_t b" @@ -118531,13 +323913,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_s64", + "name": "vuzp2q_s64", "arguments": [ "int64x2_t a", "int64x2_t b" @@ -118558,13 +323940,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_s8", + "name": "vuzp2q_s8", "arguments": [ "int8x16_t a", "int8x16_t b" @@ -118585,13 +323967,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_u16", + "name": "vuzp2q_u16", "arguments": [ "uint16x8_t a", "uint16x8_t b" @@ -118612,13 +323994,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_u32", + "name": "vuzp2q_u32", "arguments": [ "uint32x4_t a", "uint32x4_t b" @@ -118639,13 +324021,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_u64", + "name": "vuzp2q_u64", "arguments": [ "uint64x2_t a", "uint64x2_t b" @@ -118666,13 +324048,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_u8", + "name": "vuzp2q_u8", "arguments": [ "uint8x16_t a", "uint8x16_t b" @@ -118693,13 +324075,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip_f16", + "name": "vuzp_f16", "arguments": [ "float16x4_t a", "float16x4_t b" @@ -118722,14 +324104,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip_f32", + "name": "vuzp_f32", "arguments": [ "float32x2_t a", "float32x2_t b" @@ -118752,14 +324134,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip_p16", + "name": "vuzp_p16", "arguments": [ "poly16x4_t a", "poly16x4_t b" @@ -118782,14 +324164,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip_p8", + "name": "vuzp_p8", "arguments": [ "poly8x8_t a", "poly8x8_t b" @@ -118812,14 +324194,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip_s16", + "name": "vuzp_s16", "arguments": [ "int16x4_t a", "int16x4_t b" @@ -118842,14 +324224,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip_s32", + "name": "vuzp_s32", "arguments": [ "int32x2_t a", "int32x2_t b" @@ -118872,14 +324254,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip_s8", + "name": "vuzp_s8", "arguments": [ "int8x8_t a", "int8x8_t b" @@ -118902,14 +324284,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip_u16", + "name": "vuzp_u16", "arguments": [ "uint16x4_t a", "uint16x4_t b" @@ -118932,14 +324314,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip_u32", + "name": "vuzp_u32", "arguments": [ "uint32x2_t a", "uint32x2_t b" @@ -118962,14 +324344,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip_u8", + "name": "vuzp_u8", "arguments": [ "uint8x8_t a", "uint8x8_t b" @@ -118992,14 +324374,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzipq_f16", + "name": "vuzpq_f16", "arguments": [ "float16x8_t a", "float16x8_t b" @@ -119022,14 +324404,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzipq_f32", + "name": "vuzpq_f32", "arguments": [ "float32x4_t a", "float32x4_t b" @@ -119052,14 +324434,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzipq_p16", + "name": "vuzpq_p16", "arguments": [ "poly16x8_t a", "poly16x8_t b" @@ -119082,14 +324464,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzipq_p8", + "name": "vuzpq_p8", "arguments": [ "poly8x16_t a", "poly8x16_t b" @@ -119112,14 +324494,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzipq_s16", + "name": "vuzpq_s16", "arguments": [ "int16x8_t a", "int16x8_t b" @@ -119142,14 +324524,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzipq_s32", + "name": "vuzpq_s32", "arguments": [ "int32x4_t a", "int32x4_t b" @@ -119172,14 +324554,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzipq_s8", + "name": "vuzpq_s8", "arguments": [ "int8x16_t a", "int8x16_t b" @@ -119202,14 +324584,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzipq_u16", + "name": "vuzpq_u16", "arguments": [ "uint16x8_t a", "uint16x8_t b" @@ -119232,14 +324614,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzipq_u32", + "name": "vuzpq_u32", "arguments": [ "uint32x4_t a", "uint32x4_t b" @@ -119262,14 +324644,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzipq_u8", + "name": "vuzpq_u8", "arguments": [ "uint8x16_t a", "uint8x16_t b" @@ -119292,14 +324674,46 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vamin_f16", + "name": "vxarq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b", + "const int imm6" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + }, + "imm6": { + "minimum": 0, + "maximum": 63 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1_f16", "arguments": [ "float16x4_t a", "float16x4_t b" @@ -119320,26 +324734,26 @@ ], "instructions": [ [ - "FAMIN" + "ZIP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vaminq_f16", + "name": "vzip1_f32", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "float16x8_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2S" }, "b": { - "register": "Vm.8H" + "register": "Vm.2S" } }, "Architectures": [ @@ -119347,19 +324761,100 @@ ], "instructions": [ [ - "FAMIN" + "ZIP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vamin_f32", + "name": "vzip1_p16", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "poly16x4_t a", + "poly16x4_t b" ], "return_type": { - "value": "float32x2_t" + "value": "poly16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1_p8", + "arguments": [ + "poly8x8_t a", + "poly8x8_t b" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { @@ -119374,13 +324869,148 @@ ], "instructions": [ [ - "FAMIN" + "ZIP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vaminq_f32", + "name": "vzip1_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1q_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1q_f32", "arguments": [ "float32x4_t a", "float32x4_t b" @@ -119401,13 +325031,13 @@ ], "instructions": [ [ - "FAMIN" + "ZIP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vaminq_f64", + "name": "vzip1q_f64", "arguments": [ "float64x2_t a", "float64x2_t b" @@ -119428,26 +325058,26 @@ ], "instructions": [ [ - "FAMIN" + "ZIP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vamax_f16", + "name": "vzip1q_p16", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "poly16x8_t a", + "poly16x8_t b" ], "return_type": { - "value": "float16x4_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.8H" }, "b": { - "register": "Vm.4H" + "register": "Vm.8H" } }, "Architectures": [ @@ -119455,19 +325085,73 @@ ], "instructions": [ [ - "FAMAX" + "ZIP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vamaxq_f16", + "name": "vzip1q_p64", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "poly64x2_t a", + "poly64x2_t b" ], "return_type": { - "value": "float16x8_t" + "value": "poly64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1q_p8", + "arguments": [ + "poly8x16_t a", + "poly8x16_t b" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1q_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -119482,26 +325166,26 @@ ], "instructions": [ [ - "FAMAX" + "ZIP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vamax_f32", + "name": "vzip1q_s32", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "float32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.4S" }, "b": { - "register": "Vm.2S" + "register": "Vm.4S" } }, "Architectures": [ @@ -119509,19 +325193,100 @@ ], "instructions": [ [ - "FAMAX" + "ZIP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vamaxq_f32", + "name": "vzip1q_s64", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "float32x4_t" + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1q_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1q_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1q_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { @@ -119536,26 +325301,107 @@ ], "instructions": [ [ - "FAMAX" + "ZIP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vamaxq_f64", + "name": "vzip1q_u64", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1q_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip2_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip2_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "float64x2_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.2S" }, "b": { - "register": "Vm.2D" + "register": "Vm.2S" } }, "Architectures": [ @@ -119563,34 +325409,26 @@ ], "instructions": [ [ - "FAMAX" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_lane_f16", + "name": "vzip2_p16", "arguments": [ - "float16x4_t a", - "uint8x8_t b", - "const int index" + "poly16x4_t a", + "poly16x4_t b" ], "return_type": { - "value": "float16x8_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.4H" } }, "Architectures": [ @@ -119598,34 +325436,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_lane_s16", + "name": "vzip2_p8", "arguments": [ - "int16x4_t a", - "uint8x8_t b", - "const int index" + "poly8x8_t a", + "poly8x8_t b" ], "return_type": { - "value": "int16x8_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.8B" } }, "Architectures": [ @@ -119633,34 +325463,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_lane_u16", + "name": "vzip2_s16", "arguments": [ - "uint16x4_t a", - "uint8x8_t b", - "const int index" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" }, "b": { - "register": "Vm" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.4H" } }, "Architectures": [ @@ -119668,34 +325490,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_lane_p16", + "name": "vzip2_s32", "arguments": [ - "poly16x4_t a", - "uint8x8_t b", - "const int index" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "poly16x8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2S" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.2S" } }, "Architectures": [ @@ -119703,34 +325517,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_laneq_f16", + "name": "vzip2_s8", "arguments": [ - "float16x4_t a", - "uint8x16_t b", - "const int index" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.8B" } }, "Architectures": [ @@ -119738,34 +325544,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_laneq_s16", + "name": "vzip2_u16", "arguments": [ - "int16x4_t a", - "uint8x16_t b", - "const int index" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "int16x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.4H" } }, "Architectures": [ @@ -119773,34 +325571,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_laneq_u16", + "name": "vzip2_u32", "arguments": [ - "uint16x4_t a", - "uint8x16_t b", - "const int index" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2S" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.2S" } }, "Architectures": [ @@ -119808,34 +325598,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_laneq_p16", + "name": "vzip2_u8", "arguments": [ - "poly16x4_t a", - "uint8x16_t b", - "const int index" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.8B" } }, "Architectures": [ @@ -119843,17 +325625,16 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_lane_f16", + "name": "vzip2q_f16", "arguments": [ "float16x8_t a", - "uint8x8_t b", - "const int index" + "float16x8_t b" ], "return_type": { "value": "float16x8_t" @@ -119863,14 +325644,7 @@ "register": "Vn.8H" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.8H" } }, "Architectures": [ @@ -119878,34 +325652,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_lane_s16", + "name": "vzip2q_f32", "arguments": [ - "int16x8_t a", - "uint8x8_t b", - "const int index" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "int16x8_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4S" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.4S" } }, "Architectures": [ @@ -119913,34 +325679,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_lane_u16", + "name": "vzip2q_f64", "arguments": [ - "uint16x8_t a", - "uint8x8_t b", - "const int index" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2D" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.2D" } }, "Architectures": [ @@ -119948,17 +325706,16 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_lane_p16", + "name": "vzip2q_p16", "arguments": [ "poly16x8_t a", - "uint8x8_t b", - "const int index" + "poly16x8_t b" ], "return_type": { "value": "poly16x8_t" @@ -119968,14 +325725,7 @@ "register": "Vn.8H" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.8H" } }, "Architectures": [ @@ -119983,34 +325733,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_laneq_f16", + "name": "vzip2q_p64", "arguments": [ - "float16x8_t a", - "uint8x16_t b", - "const int index" + "poly64x2_t a", + "poly64x2_t b" ], "return_type": { - "value": "float16x8_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2D" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.2D" } }, "Architectures": [ @@ -120018,34 +325760,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_laneq_s16", + "name": "vzip2q_p8", "arguments": [ - "int16x8_t a", - "uint8x16_t b", - "const int index" + "poly8x16_t a", + "poly8x16_t b" ], "return_type": { - "value": "int16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.16B" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.16B" } }, "Architectures": [ @@ -120053,34 +325787,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_laneq_u16", + "name": "vzip2q_s16", "arguments": [ - "uint16x8_t a", - "uint8x16_t b", - "const int index" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.8H" } }, "Architectures": [ @@ -120088,34 +325814,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_lane_u8", + "name": "vzip2q_s32", "arguments": [ - "uint8x8_t a", - "uint8x8_t b", - "const int lane" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.4S" } }, "Architectures": [ @@ -120123,34 +325841,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_lane_u8", + "name": "vzip2q_s64", "arguments": [ - "uint8x16_t a", - "uint8x8_t b", - "const int lane" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.2D" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.2D" } }, "Architectures": [ @@ -120158,17 +325868,16 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_lane_s8", + "name": "vzip2q_s8", "arguments": [ - "int8x8_t a", - "uint8x8_t b", - "const int lane" + "int8x16_t a", + "int8x16_t b" ], "return_type": { "value": "int8x16_t" @@ -120178,14 +325887,7 @@ "register": "Vn.16B" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.16B" } }, "Architectures": [ @@ -120193,34 +325895,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_lane_s8", + "name": "vzip2q_u16", "arguments": [ - "int8x16_t a", - "uint8x8_t b", - "const int lane" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int8x16_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.8H" } }, "Architectures": [ @@ -120228,34 +325922,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_lane_p8", + "name": "vzip2q_u32", "arguments": [ - "poly8x8_t a", - "uint8x8_t b", - "const int lane" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.4S" } }, "Architectures": [ @@ -120263,34 +325949,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_lane_p8", + "name": "vzip2q_u64", "arguments": [ - "poly8x16_t a", - "uint8x8_t b", - "const int lane" + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.2D" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.2D" } }, "Architectures": [ @@ -120298,17 +325976,16 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_laneq_u8", + "name": "vzip2q_u8", "arguments": [ - "uint8x8_t a", - "uint8x16_t b", - "const int index" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { "value": "uint8x16_t" @@ -120318,14 +325995,7 @@ "register": "Vn.16B" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.16B" } }, "Architectures": [ @@ -120333,707 +326003,607 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_laneq_u8", + "name": "vzip_f16", "arguments": [ - "uint8x16_t a", - "uint8x16_t b", - "const int index" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "float16x4x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4H" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI2" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_laneq_s8", + "name": "vzip_f32", "arguments": [ - "int8x8_t a", - "uint8x16_t b", - "const int index" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "int8x16_t" + "value": "float32x2x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.2S" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI2" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_laneq_s8", + "name": "vzip_p16", "arguments": [ - "int8x16_t a", - "uint8x16_t b", - "const int index" + "poly16x4_t a", + "poly16x4_t b" ], "return_type": { - "value": "int8x16_t" + "value": "poly16x4x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4H" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI2" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_laneq_p8", + "name": "vzip_p8", "arguments": [ "poly8x8_t a", - "uint8x16_t b", - "const int index" + "poly8x8_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "poly8x8x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8B" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI2" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_laneq_p8", + "name": "vzip_s16", "arguments": [ - "poly8x16_t a", - "uint8x16_t b", - "const int index" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "int16x4x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4H" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI2" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_laneq_p16", + "name": "vzip_s32", "arguments": [ - "poly16x8_t a", - "uint8x16_t b", - "const int index" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "poly16x8_t" + "value": "int32x2x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2S" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI2" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_lane_u8", + "name": "vzip_s8", "arguments": [ - "uint8x16_t vn", - "uint8x8_t vm", - "const int index" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "int8x8x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn.16B" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 0 + "a": { + "register": "Vn.8B" }, - "r": { - "register": "Vd.16B" + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_laneq_u8", + "name": "vzip_u16", "arguments": [ - "uint8x16_t vn", - "uint8x16_t vm", - "const int index" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "uint16x4x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn.16B" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 + "a": { + "register": "Vn.4H" }, - "r": { - "register": "Vd.16B" + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_lane_s8", + "name": "vzip_u32", "arguments": [ - "int8x16_t vn", - "uint8x8_t vm", - "const int index" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "int8x16_t" + "value": "uint32x2x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn.16B" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 0 + "a": { + "register": "Vn.2S" }, - "r": { - "register": "Vd.16B" + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_laneq_s8", + "name": "vzip_u8", "arguments": [ - "int8x16_t vn", - "uint8x16_t vm", - "const int index" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "int8x16_t" + "value": "uint8x8x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn.16B" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 + "a": { + "register": "Vn.8B" }, - "r": { - "register": "Vd.16B" + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_lane_p8", + "name": "vzipq_f16", "arguments": [ - "poly8x16_t vn", - "uint8x8_t vm", - "const int index" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "float16x8x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn.16B" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 0 + "a": { + "register": "Vn.8H" }, - "r": { - "register": "Vd.16B" + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_laneq_p8", + "name": "vzipq_f32", "arguments": [ - "poly8x16_t vn", - "uint8x16_t vm", - "const int index" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn.16B" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 + "a": { + "register": "Vn.4S" }, - "r": { - "register": "Vd.16B" + "b": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_lane_u16_x2", + "name": "vzipq_p16", "arguments": [ - "uint16x8x2_t vn", - "uint8x8_t vm", - "const int index" + "poly16x8_t a", + "poly16x8_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "poly16x8x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 + "a": { + "register": "Vn.8H" }, - "r": { - "register": "Vd.8H" + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_laneq_u16_x2", + "name": "vzipq_p8", "arguments": [ - "uint16x8x2_t vn", - "uint8x16_t vm", - "const int index" + "poly8x16_t a", + "poly8x16_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 + "a": { + "register": "Vn.16B" }, - "r": { - "register": "Vd.8H" + "b": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_lane_s16_x2", + "name": "vzipq_s16", "arguments": [ - "int16x8x2_t vn", - "uint8x8_t vm", - "const int index" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int16x8x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 + "a": { + "register": "Vn.8H" }, - "r": { - "register": "Vd.8H" + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_laneq_s16_x2", + "name": "vzipq_s32", "arguments": [ - "int16x8x2_t vn", - "uint8x16_t vm", - "const int index" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int32x4x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 + "a": { + "register": "Vn.4S" }, - "r": { - "register": "Vd.8H" + "b": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_lane_f16_x2", + "name": "vzipq_s8", "arguments": [ - "float16x8x2_t vn", - "uint8x8_t vm", - "const int index" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "float16x8_t" + "value": "int8x16x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 + "a": { + "register": "Vn.16B" }, - "r": { - "register": "Vd.8H" + "b": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_laneq_f16_x2", + "name": "vzipq_u16", "arguments": [ - "float16x8x2_t vn", - "uint8x16_t vm", - "const int index" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "float16x8_t" + "value": "uint16x8x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 + "a": { + "register": "Vn.8H" }, - "r": { - "register": "Vd.8H" + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_lane_p16_x2", + "name": "vzipq_u32", "arguments": [ - "poly16x8x2_t vn", - "uint8x8_t vm", - "const int index" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "poly16x8_t" + "value": "uint32x4x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 + "a": { + "register": "Vn.4S" }, - "r": { - "register": "Vd.8H" + "b": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_laneq_p16_x2", + "name": "vzipq_u8", "arguments": [ - "poly16x8x2_t vn", - "uint8x16_t vm", - "const int index" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x16x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 + "a": { + "register": "Vn.16B" }, - "r": { - "register": "Vd.8H" + "b": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, From a370aa3251019ef319cf76bef1b7cc333cbd3ec3 Mon Sep 17 00:00:00 2001 From: David Wood Date: Fri, 16 Jan 2026 12:46:33 +0000 Subject: [PATCH 42/64] stdarch-verify: support sve Co-authored-by: Adam Gemmell Co-authored-by: Jamie Cunliffe Co-authored-by: Jacob Bramley Co-authored-by: Luca Vizzarro --- .../stdarch/crates/stdarch-verify/src/lib.rs | 67 +++++- .../crates/stdarch-verify/tests/arm.rs | 195 +++++++++++++++--- 2 files changed, 225 insertions(+), 37 deletions(-) diff --git a/library/stdarch/crates/stdarch-verify/src/lib.rs b/library/stdarch/crates/stdarch-verify/src/lib.rs index c81f5f45bcce4..f7304ab326858 100644 --- a/library/stdarch/crates/stdarch-verify/src/lib.rs +++ b/library/stdarch/crates/stdarch-verify/src/lib.rs @@ -120,6 +120,13 @@ fn functions(input: TokenStream, dirs: &[&str]) -> TokenStream { ); } + // Newer intrinsics don't have `rustc_legacy_const_generics` - assume they belong at + // the end of the argument list + if required_const.is_empty() && legacy_const_generics.is_empty() { + legacy_const_generics = + (arguments.len()..(arguments.len() + const_arguments.len())).collect(); + } + // The list of required consts, used to verify the arguments, comes from either the // `rustc_args_required_const` or the `rustc_legacy_const_generics` attribute. let required_const = if required_const.is_empty() { @@ -136,14 +143,14 @@ fn functions(input: TokenStream, dirs: &[&str]) -> TokenStream { arguments.insert(idx, ty); } - // strip leading underscore from fn name when building a test - // _mm_foo -> mm_foo such that the test name is test_mm_foo. - let test_name_string = format!("{name}"); - let mut test_name_id = test_name_string.as_str(); - while test_name_id.starts_with('_') { - test_name_id = &test_name_id[1..]; - } - let has_test = tests.contains(&format!("test_{test_name_id}")); + // Strip leading underscore from fn name when building a test + // `_mm_foo` -> `mm_foo` such that the test name is `test_mm_foo`. + let test_name = name.to_string(); + let test_name = test_name.trim_start_matches('_'); + let has_test = tests.contains(&format!("test_{test_name}")) + // SVE load/store tests start with `test` or `_with_` + || tests.iter().any(|t| t.starts_with(&format!("test_{test_name}")) + || t.ends_with(&format!("_with_{test_name}"))); let doc = find_doc(&f.attrs); @@ -347,6 +354,50 @@ fn to_type(t: &syn::Type) -> proc_macro2::TokenStream { "v4f32" => quote! { &v4f32 }, "v2f64" => quote! { &v2f64 }, + "svbool_t" => quote! { &SVBOOL }, + "svint8_t" => quote! { &SVI8 }, + "svint8x2_t" => quote! { &SVI8X2 }, + "svint8x3_t" => quote! { &SVI8X3 }, + "svint8x4_t" => quote! { &SVI8X4 }, + "svint16_t" => quote! { &SVI16 }, + "svint16x2_t" => quote! { &SVI16X2 }, + "svint16x3_t" => quote! { &SVI16X3 }, + "svint16x4_t" => quote! { &SVI16X4 }, + "svint32_t" => quote! { &SVI32 }, + "svint32x2_t" => quote! { &SVI32X2 }, + "svint32x3_t" => quote! { &SVI32X3 }, + "svint32x4_t" => quote! { &SVI32X4 }, + "svint64_t" => quote! { &SVI64 }, + "svint64x2_t" => quote! { &SVI64X2 }, + "svint64x3_t" => quote! { &SVI64X3 }, + "svint64x4_t" => quote! { &SVI64X4 }, + "svuint8_t" => quote! { &SVU8 }, + "svuint8x2_t" => quote! { &SVU8X2 }, + "svuint8x3_t" => quote! { &SVU8X3 }, + "svuint8x4_t" => quote! { &SVU8X4 }, + "svuint16_t" => quote! { &SVU16 }, + "svuint16x2_t" => quote! { &SVU16X2 }, + "svuint16x3_t" => quote! { &SVU16X3 }, + "svuint16x4_t" => quote! { &SVU16X4 }, + "svuint32_t" => quote! { &SVU32 }, + "svuint32x2_t" => quote! { &SVU32X2 }, + "svuint32x3_t" => quote! { &SVU32X3 }, + "svuint32x4_t" => quote! { &SVU32X4 }, + "svuint64_t" => quote! { &SVU64 }, + "svuint64x2_t" => quote! { &SVU64X2 }, + "svuint64x3_t" => quote! { &SVU64X3 }, + "svuint64x4_t" => quote! { &SVU64X4 }, + "svfloat32_t" => quote! { &SVF32 }, + "svfloat32x2_t" => quote! { &SVF32X2 }, + "svfloat32x3_t" => quote! { &SVF32X3 }, + "svfloat32x4_t" => quote! { &SVF32X4 }, + "svfloat64_t" => quote! { &SVF64 }, + "svfloat64x2_t" => quote! { &SVF64X2 }, + "svfloat64x3_t" => quote! { &SVF64X3 }, + "svfloat64x4_t" => quote! { &SVF64X4 }, + "svprfop" => quote! { &SVPRFOP }, + "svpattern" => quote! { &SVPATTERN }, + // Generic types "T" => quote! { &GENERICT }, "U" => quote! { &GENERICU }, diff --git a/library/stdarch/crates/stdarch-verify/tests/arm.rs b/library/stdarch/crates/stdarch-verify/tests/arm.rs index c5744de3f644b..a37af2222a5de 100644 --- a/library/stdarch/crates/stdarch-verify/tests/arm.rs +++ b/library/stdarch/crates/stdarch-verify/tests/arm.rs @@ -16,6 +16,7 @@ struct Function { doc: &'static str, } +static BOOL: Type = Type::PrimBool; static F16: Type = Type::PrimFloat(16); static F32: Type = Type::PrimFloat(32); static F64: Type = Type::PrimFloat(64); @@ -28,6 +29,7 @@ static U32: Type = Type::PrimUnsigned(32); static U64: Type = Type::PrimUnsigned(64); static U8: Type = Type::PrimUnsigned(8); static NEVER: Type = Type::Never; +static VOID: Type = Type::Void; static GENERICT: Type = Type::GenericParam("T"); static GENERICU: Type = Type::GenericParam("U"); @@ -151,19 +153,78 @@ static U8X8X2: Type = Type::U(8, 8, 2); static U8X8X3: Type = Type::U(8, 8, 3); static U8X8X4: Type = Type::U(8, 8, 4); +static SVBOOL: Type = Type::Pred(1); +static SVBOOLX2: Type = Type::Pred(2); +static SVBOOLX3: Type = Type::Pred(3); +static SVBOOLX4: Type = Type::Pred(4); +static SVCOUNT: Type = Type::Pred(1); +static SVF16: Type = Type::SVF(16, 1); +static SVF16X2: Type = Type::SVF(16, 2); +static SVF16X3: Type = Type::SVF(16, 3); +static SVF16X4: Type = Type::SVF(16, 4); +static SVF32: Type = Type::SVF(32, 1); +static SVF32X2: Type = Type::SVF(32, 2); +static SVF32X3: Type = Type::SVF(32, 3); +static SVF32X4: Type = Type::SVF(32, 4); +static SVF64: Type = Type::SVF(64, 1); +static SVF64X2: Type = Type::SVF(64, 2); +static SVF64X3: Type = Type::SVF(64, 3); +static SVF64X4: Type = Type::SVF(64, 4); +static SVI8: Type = Type::SVI(8, 1); +static SVI8X2: Type = Type::SVI(8, 2); +static SVI8X3: Type = Type::SVI(8, 3); +static SVI8X4: Type = Type::SVI(8, 4); +static SVI16: Type = Type::SVI(16, 1); +static SVI16X2: Type = Type::SVI(16, 2); +static SVI16X3: Type = Type::SVI(16, 3); +static SVI16X4: Type = Type::SVI(16, 4); +static SVI32: Type = Type::SVI(32, 1); +static SVI32X2: Type = Type::SVI(32, 2); +static SVI32X3: Type = Type::SVI(32, 3); +static SVI32X4: Type = Type::SVI(32, 4); +static SVI64: Type = Type::SVI(64, 1); +static SVI64X2: Type = Type::SVI(64, 2); +static SVI64X3: Type = Type::SVI(64, 3); +static SVI64X4: Type = Type::SVI(64, 4); +static SVU8: Type = Type::SVU(8, 1); +static SVU8X2: Type = Type::SVU(8, 2); +static SVU8X3: Type = Type::SVU(8, 3); +static SVU8X4: Type = Type::SVU(8, 4); +static SVU16: Type = Type::SVU(16, 1); +static SVU16X2: Type = Type::SVU(16, 2); +static SVU16X3: Type = Type::SVU(16, 3); +static SVU16X4: Type = Type::SVU(16, 4); +static SVU32: Type = Type::SVU(32, 1); +static SVU32X2: Type = Type::SVU(32, 2); +static SVU32X3: Type = Type::SVU(32, 3); +static SVU32X4: Type = Type::SVU(32, 4); +static SVU64: Type = Type::SVU(64, 1); +static SVU64X2: Type = Type::SVU(64, 2); +static SVU64X3: Type = Type::SVU(64, 3); +static SVU64X4: Type = Type::SVU(64, 4); +static SVPRFOP: Type = Type::Enum("svprfop"); +static SVPATTERN: Type = Type::Enum("svpattern"); + #[derive(Debug, Copy, Clone, PartialEq)] enum Type { + Void, + PrimBool, PrimFloat(u8), PrimSigned(u8), PrimUnsigned(u8), PrimPoly(u8), MutPtr(&'static Type), ConstPtr(&'static Type), + Enum(&'static str), GenericParam(&'static str), I(u8, u8, u8), U(u8, u8, u8), P(u8, u8, u8), F(u8, u8, u8), + Pred(u8), + SVI(u8, u8), + SVU(u8, u8), + SVF(u8, u8), Never, } @@ -182,19 +243,18 @@ fn verify_all_signatures() { let mut all_valid = true; for rust in FUNCTIONS { + // Most SVE intrinsics just rely on the intrinsics test tool for validation if !rust.has_test { - if !SKIP_RUNTIME_TESTS.contains(&rust.name) { - println!( - "missing run-time test named `test_{}` for `{}`", - { - let mut id = rust.name; - while id.starts_with('_') { - id = &id[1..]; - } - id - }, - rust.name - ); + if !SKIP_RUNTIME_TESTS.contains(&rust.name) + // Most run-time tests are handled by the intrinsic-test tool, except for + // load/stores (which have generated tests) + && (!rust.name.starts_with("sv") || rust.name.starts_with("svld") + || rust.name.starts_with("svst")) + // The load/store test generator can't handle these cases yet + && (!rust.name.contains("_u32base_") || rust.name.contains("index") || rust.name.contains("offset")) + && !(rust.name.starts_with("svldff1") && rust.name.contains("gather")) + { + println!("missing run-time test for `{}`", rust.name); all_valid = false; } } @@ -269,12 +329,21 @@ fn matches(rust: &Function, arm: &Intrinsic) -> Result<(), String> { let mut nconst = 0; let iter = rust.arguments.iter().zip(&arm.arguments).enumerate(); for (i, (rust_ty, (arm, arm_const))) in iter { - if *rust_ty != arm { - bail!("mismatched arguments: {rust_ty:?} != {arm:?}") + match (*rust_ty, arm) { + // SVE uses generic type parameters to handle void pointers + (Type::ConstPtr(Type::GenericParam("T")), Type::ConstPtr(Type::Void)) => (), + // SVE const generics use i32 over u64 for usability reasons + (Type::PrimSigned(32), Type::PrimUnsigned(64)) if rust.required_const.contains(&i) => { + () + } + // svset doesn't have its const argument last as we assumed when building the Function + _ if rust.name.starts_with("svset") => (), + (x, y) if x == y => (), + _ => bail!("mismatched arguments: {rust_ty:?} != {arm:?}"), } if *arm_const { nconst += 1; - if !rust.required_const.contains(&i) { + if !rust.required_const.contains(&i) && !rust.name.starts_with("svset") { bail!("argument const mismatch"); } } @@ -283,7 +352,7 @@ fn matches(rust: &Function, arm: &Intrinsic) -> Result<(), String> { bail!("wrong number of const arguments"); } - if rust.instrs.is_empty() { + if rust.instrs.is_empty() && arm.instruction != "" { bail!( "instruction not listed for `{}`, but arm lists {:?}", rust.name, @@ -322,7 +391,7 @@ fn matches(rust: &Function, arm: &Intrinsic) -> Result<(), String> { Ok(()) } -#[derive(PartialEq)] +#[derive(Debug, PartialEq)] struct Intrinsic { name: String, ret: Option, @@ -337,7 +406,7 @@ struct JsonIntrinsic { arguments: Vec, return_type: ReturnType, #[serde(default)] - instructions: Vec>, + instructions: Option>>, } #[derive(Deserialize, Debug)] @@ -356,6 +425,8 @@ fn parse_intrinsics(intrinsics: Vec) -> HashMap Intrinsic { let name = intr.name; + // Remove '[' and ']' so that intrinsics of the form `svwhilerw[_s16]` becomes `svwhilerw_s16`. + let name = name.replace('[', "").replace(']', ""); let ret = if intr.return_type.value == "void" { None } else { @@ -364,18 +435,24 @@ fn parse_intrinsic(mut intr: JsonIntrinsic) -> Intrinsic { // This ignores multiple instructions and different optional sequences for now to mimic // the old HTML scraping behaviour - let instruction = intr.instructions.swap_remove(0).swap_remove(0); + let instruction = intr + .instructions + .map_or(String::new(), |mut i| i.swap_remove(0).swap_remove(0)); let arguments = intr .arguments .iter() .map(|s| { - let (ty, konst) = match s.strip_prefix("const") { - Some(stripped) => (stripped.trim_start(), true), - None => (s.as_str(), false), + let ty = if let Some(i) = s.find('*') { + &s[..i + 1] + } else { + s.rsplit_once(' ').unwrap().0.trim_start_matches("const ") }; - let ty = ty.rsplit_once(' ').unwrap().0; - (parse_ty(ty), konst) + let ty = parse_ty(ty); + let konst = s.contains("const") && !matches!(ty, Type::ConstPtr(_)) + || s.starts_with("enum") + || s.rsplit_once(" ").unwrap().1.starts_with("imm"); + (ty, konst) }) .collect::>(); @@ -388,18 +465,27 @@ fn parse_intrinsic(mut intr: JsonIntrinsic) -> Intrinsic { } fn parse_ty(s: &str) -> Type { - let suffix = " const *"; - if let Some(base) = s.strip_suffix(suffix) { - Type::ConstPtr(parse_ty_base(base)) - } else if let Some(base) = s.strip_suffix(" *") { - Type::MutPtr(parse_ty_base(base)) + if let Some(ty) = s.strip_suffix("*") { + let ty = ty.trim(); + if let Some(ty) = ty.strip_prefix("const") { + // SVE intrinsics are west-const (`const int8_t *`) + Type::ConstPtr(parse_ty_base(ty)) + } else if let Some(ty) = ty.strip_suffix("const") { + // Neon intrinsics are east-const (`int8_t const *`) + Type::ConstPtr(parse_ty_base(ty)) + } else { + Type::MutPtr(parse_ty_base(ty)) + } } else { *parse_ty_base(s) } } fn parse_ty_base(s: &str) -> &'static Type { + let s = s.trim(); match s { + "bool" => &BOOL, + "void" => &VOID, "float16_t" => &F16, "float16x4_t" => &F16X4, "float16x4x2_t" => &F16X4X2, @@ -529,6 +615,57 @@ fn parse_ty_base(s: &str) -> &'static Type { "uint8x8x2_t" => &U8X8X2, "uint8x8x3_t" => &U8X8X3, "uint8x8x4_t" => &U8X8X4, + "svbool_t" => &SVBOOL, + "svboolx2_t" => &SVBOOLX2, + "svboolx3_t" => &SVBOOLX3, + "svboolx4_t" => &SVBOOLX4, + "svcount_t" => &SVCOUNT, + "svfloat16_t" => &SVF16, + "svfloat16x2_t" => &SVF16X2, + "svfloat16x3_t" => &SVF16X3, + "svfloat16x4_t" => &SVF16X4, + "svfloat32_t" => &SVF32, + "svfloat32x2_t" => &SVF32X2, + "svfloat32x3_t" => &SVF32X3, + "svfloat32x4_t" => &SVF32X4, + "svfloat64_t" => &SVF64, + "svfloat64x2_t" => &SVF64X2, + "svfloat64x3_t" => &SVF64X3, + "svfloat64x4_t" => &SVF64X4, + "svint8_t" => &SVI8, + "svint8x2_t" => &SVI8X2, + "svint8x3_t" => &SVI8X3, + "svint8x4_t" => &SVI8X4, + "svint16_t" => &SVI16, + "svint16x2_t" => &SVI16X2, + "svint16x3_t" => &SVI16X3, + "svint16x4_t" => &SVI16X4, + "svint32_t" => &SVI32, + "svint32x2_t" => &SVI32X2, + "svint32x3_t" => &SVI32X3, + "svint32x4_t" => &SVI32X4, + "svint64_t" => &SVI64, + "svint64x2_t" => &SVI64X2, + "svint64x3_t" => &SVI64X3, + "svint64x4_t" => &SVI64X4, + "svuint8_t" => &SVU8, + "svuint8x2_t" => &SVU8X2, + "svuint8x3_t" => &SVU8X3, + "svuint8x4_t" => &SVU8X4, + "svuint16_t" => &SVU16, + "svuint16x2_t" => &SVU16X2, + "svuint16x3_t" => &SVU16X3, + "svuint16x4_t" => &SVU16X4, + "svuint32_t" => &SVU32, + "svuint32x2_t" => &SVU32X2, + "svuint32x3_t" => &SVU32X3, + "svuint32x4_t" => &SVU32X4, + "svuint64_t" => &SVU64, + "svuint64x2_t" => &SVU64X2, + "svuint64x3_t" => &SVU64X3, + "svuint64x4_t" => &SVU64X4, + "enum svprfop" => &SVPRFOP, + "enum svpattern" => &SVPATTERN, _ => panic!("failed to parse json type {s:?}"), } From b6b2ce3d44220c77223a25c20cf99cb98732ab72 Mon Sep 17 00:00:00 2001 From: David Wood Date: Wed, 4 Mar 2026 14:16:40 +0000 Subject: [PATCH 43/64] core_arch: no SVE on arm64ec arm64ec doesn't support SVE. --- library/stdarch/crates/core_arch/src/aarch64/mod.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/library/stdarch/crates/core_arch/src/aarch64/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/mod.rs index 9376e04b3b53a..0292be2e0d778 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/mod.rs @@ -25,11 +25,17 @@ mod neon; #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub use self::neon::*; +// The rest of `core_arch::aarch64` is available on `arm64ec` but SVE is not supported on `arm64ec`. +#[cfg(any(target_arch = "aarch64", doc))] mod sve; +#[cfg(any(target_arch = "aarch64", doc))] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub use self::sve::*; +// The rest of `core_arch::aarch64` is available on `arm64ec` but SVE is not supported on `arm64ec`. +#[cfg(any(target_arch = "aarch64", doc))] mod sve2; +#[cfg(any(target_arch = "aarch64", doc))] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub use self::sve2::*; From c21d4e99eabe94f3270c691979d35a9e0dd9b4ae Mon Sep 17 00:00:00 2001 From: David Wood Date: Wed, 4 Mar 2026 14:16:40 +0000 Subject: [PATCH 44/64] intrinsic-test: update parsing for SVE intrinsics With SVE intrinsics in the `arm_intrinsics.json`, the parsing needs to be updated to know to expect any new fields. --- library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs b/library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs index 65c179ef0d083..c1563a7364ce7 100644 --- a/library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs +++ b/library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs @@ -12,6 +12,8 @@ use std::path::Path; #[serde(deny_unknown_fields)] struct ReturnType { value: String, + #[serde(rename = "element_bit_size")] + _element_bit_size: Option, } #[derive(Deserialize, Debug)] @@ -50,6 +52,8 @@ struct JsonIntrinsic { args_prep: Option>, #[serde(rename = "Architectures")] architectures: Vec, + #[serde(rename = "instructions")] + _instructions: Option>>, } pub fn get_neon_intrinsics( From 88b49085833e4a0ee42b4b606cdbda48434e38ca Mon Sep 17 00:00:00 2001 From: David Wood Date: Thu, 9 Apr 2026 09:14:28 +0000 Subject: [PATCH 45/64] assert-instr: support type generics SVE intrinsics have both type and const generics and so the `assert_instr` macro needs to be able to generate test cases with the type generics instantiated with the types provided in the attribute. Co-authored-by: Jamie Cunliffe Co-authored-by: Luca Vizzarro Co-authored-by: Adam Gemmell Co-authored-by: Jacob Bramley --- .../crates/assert-instr-macro/src/lib.rs | 79 +++++++++++++++---- 1 file changed, 65 insertions(+), 14 deletions(-) diff --git a/library/stdarch/crates/assert-instr-macro/src/lib.rs b/library/stdarch/crates/assert-instr-macro/src/lib.rs index 13c3c3851b43c..839aae67cb2b0 100644 --- a/library/stdarch/crates/assert-instr-macro/src/lib.rs +++ b/library/stdarch/crates/assert-instr-macro/src/lib.rs @@ -14,6 +14,7 @@ extern crate quote; use proc_macro2::TokenStream; use quote::ToTokens; +use syn::spanned::Spanned; #[proc_macro_attribute] pub fn assert_instr( @@ -67,21 +68,21 @@ pub fn assert_instr( ); let mut inputs = Vec::new(); let mut input_vals = Vec::new(); - let mut const_vals = Vec::new(); + let mut param_vals = Vec::new(); let ret = &func.sig.output; for arg in func.sig.inputs.iter() { let capture = match *arg { - syn::FnArg::Typed(ref c) => c, + syn::FnArg::Typed(ref c) => c.to_owned(), ref v => panic!( "arguments must not have patterns: `{:?}`", v.clone().into_token_stream() ), }; - let ident = match *capture.pat { - syn::Pat::Ident(ref i) => &i.ident, + let ident = match capture.pat.as_ref() { + syn::Pat::Ident(i) => &i.ident.to_owned(), _ => panic!("must have bare arguments"), }; - if let Some((_, tokens)) = invoc.args.iter().find(|a| *ident == a.0) { + if let Some(&(_, ref tokens)) = invoc.args.iter().find(|a| *ident == a.0) { input_vals.push(quote! { #tokens }); } else { inputs.push(capture); @@ -89,18 +90,48 @@ pub fn assert_instr( } } for arg in func.sig.generics.params.iter() { - let c = match *arg { - syn::GenericParam::Const(ref c) => c, + match *arg { + syn::GenericParam::Const(ref c) => { + if let Some((_, tokens)) = invoc.args.iter().find(|a| c.ident == a.0) { + param_vals.push(quote! { #tokens }); + } else { + panic!("const generics must have a value for tests"); + } + } + syn::GenericParam::Type(ref t) => { + if let Some((_, tokens)) = invoc.args.iter().find(|a| t.ident == a.0) + && let syn::Expr::Path(syn::ExprPath { qself, path, .. }) = tokens + { + param_vals.push(syn::Token![_](tokens.span()).to_token_stream()); + + let generic_ty_value = syn::TypePath { + qself: qself.clone(), + path: path.clone(), + }; + + // Replace any function arguments that use generic parameters with the + // instantiation provided in the macro invocation. + inputs.iter_mut().for_each(|arg| { + update_type_path(arg.ty.as_mut(), |type_path: &mut syn::TypePath| { + if let Some(syn::PathSegment { + ident: last_ident, .. + }) = type_path.path.segments.last_mut() + { + if *last_ident == t.ident { + *type_path = generic_ty_value.to_owned() + } + } + }) + }); + } else { + panic!("type generics must have a type for tests"); + } + } ref v => panic!( - "only const generics are allowed: `{:?}`", + "only type and const generics are allowed: `{:?}`", v.clone().into_token_stream() ), }; - if let Some((_, tokens)) = invoc.args.iter().find(|a| c.ident == a.0) { - const_vals.push(quote! { #tokens }); - } else { - panic!("const generics must have a value for tests"); - } } let attrs = func @@ -138,7 +169,7 @@ pub fn assert_instr( #[unsafe(no_mangle)] #[inline(never)] pub unsafe extern #abi fn #shim_name(#(#inputs),*) #ret { - #name::<#(#const_vals),*>(#(#input_vals),*) + #name::<#(#param_vals),*>(#(#input_vals),*) } }; @@ -222,3 +253,23 @@ where } } } + +/// Calls `update` on type paths so that type generics can be replaced with the instantiation from +/// the attribute. +fn update_type_path(ty: &mut syn::Type, update: F) +where + F: Fn(&mut syn::TypePath), +{ + use syn::Type::*; + match ty { + Array(syn::TypeArray { elem, .. }) + | Group(syn::TypeGroup { elem, .. }) + | Paren(syn::TypeParen { elem, .. }) + | Ptr(syn::TypePtr { elem, .. }) + | Reference(syn::TypeReference { elem, .. }) + | Slice(syn::TypeSlice { elem, .. }) => update_type_path(elem.as_mut(), update), + Path(path @ syn::TypePath { .. }) => update(path), + Tuple(..) => panic!("tuples and generic types together are not yet supported"), + _ => {} + } +} From acb48ca2cac50ca659abaa1b041ad219215bbd7c Mon Sep 17 00:00:00 2001 From: David Wood Date: Mon, 13 Apr 2026 04:42:58 +0000 Subject: [PATCH 46/64] gen-arm: disable `assert_instr` for `pfalse` The implementation for this has the same behaviour as a `pfalse` but doesn't currently emit one until an intrinsic is added to emit a `zeroinitializer` for this. --- .../crates/core_arch/src/aarch64/sve/generated.rs | 1 - .../crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml | 4 +++- library/stdarch/crates/stdarch-verify/tests/arm.rs | 10 +++++++++- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs index 6edfc8e159a75..ed28e98a813ea 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs @@ -30819,7 +30819,6 @@ pub fn svorv_u64(pg: svbool_t, op: svuint64_t) -> u64 { #[inline(always)] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(pfalse))] pub fn svpfalse_b() -> svbool_t { svdupq_n_b8( false, false, false, false, false, false, false, false, false, false, false, false, false, diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml index 1fad8bb371f90..383e50b7cc70c 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml @@ -3611,7 +3611,9 @@ intrinsics: doc: Set all predicate elements to false arguments: [] return_type: "svbool_t" - assert_instr: [pfalse] + # TODO: With current implementation, `pfalse` isn't generated, will need to add intrinsic to + # generate `zeroinitializer` + # assert_instr: [pfalse] compose: - FnCall: - "svdupq_n_b8" diff --git a/library/stdarch/crates/stdarch-verify/tests/arm.rs b/library/stdarch/crates/stdarch-verify/tests/arm.rs index a37af2222a5de..2242bf4264e57 100644 --- a/library/stdarch/crates/stdarch-verify/tests/arm.rs +++ b/library/stdarch/crates/stdarch-verify/tests/arm.rs @@ -352,7 +352,10 @@ fn matches(rust: &Function, arm: &Intrinsic) -> Result<(), String> { bail!("wrong number of const arguments"); } - if rust.instrs.is_empty() && arm.instruction != "" { + if rust.instrs.is_empty() + && arm.instruction != "" + && !SKIP_ASSERT_INSTR_TESTS.contains(&rust.name) + { bail!( "instruction not listed for `{}`, but arm lists {:?}", rust.name, @@ -671,6 +674,11 @@ fn parse_ty_base(s: &str) -> &'static Type { } } +// FIXME(arm-maintainers): Some tests require new rustc intrinsics in order to generate +// the appropriate instruction, though they do have the correct behaviour - these will be fixed +// but are disabled for now. +static SKIP_ASSERT_INSTR_TESTS: &'static [&'static str] = &["svpfalse_b"]; + // FIXME(arm-maintainers): With the advent of the `intrinsic-test` tool, new tests of this kind // are no longer being added and just adding to this list indefinitely isn't the best solution for // dealing with that. From e6c0129553cf1e8605b6ece59984c7c29a4380d2 Mon Sep 17 00:00:00 2001 From: David Wood Date: Tue, 14 Apr 2026 00:03:19 +0000 Subject: [PATCH 47/64] stdarch-test: `[us]shll[tb]` have no aliases SVE's `[us]shll[tb]` intructions have no aliases unlike Neon's `[us]hll{2}` so this logic needs adjusted to not accidentally rewrite the instruction. --- .../crates/stdarch-test/src/disassembly.rs | 24 +++++++++++++------ 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/library/stdarch/crates/stdarch-test/src/disassembly.rs b/library/stdarch/crates/stdarch-test/src/disassembly.rs index 237e8d2dc28a0..7cf657baa0d71 100644 --- a/library/stdarch/crates/stdarch-test/src/disassembly.rs +++ b/library/stdarch/crates/stdarch-test/src/disassembly.rs @@ -158,16 +158,26 @@ fn parse(output: &str) -> HashSet { }; if cfg!(any(target_arch = "aarch64", target_arch = "arm64ec")) { - // Normalize [us]shll.* ..., #0 instructions to the preferred form: [us]xtl.* ... - // as neither LLVM objdump nor dumpbin does that. - // See https://developer.arm.com/documentation/ddi0602/latest/SIMD-FP-Instructions/UXTL--UXTL2--Unsigned-extend-Long--an-alias-of-USHLL--USHLL2- - // and https://developer.arm.com/documentation/ddi0602/latest/SIMD-FP-Instructions/SXTL--SXTL2--Signed-extend-Long--an-alias-of-SSHLL--SSHLL2- - // for details. + // Normalize `[us]shll{2}.* ..., #0` instructions to the preferred + // form: `[us]xtl{2}.* ...` as neither LLVM objdump nor dumpbin does that. + // + // SVE has `[us]shll[tb]` instructions that don't have an equivalent alias. + // + // See Arm documentation for details: + // + // - https://developer.arm.com/documentation/ddi0602/2026-03/SIMD-FP-Instructions/UXTL--UXTL2--Unsigned-extend-long--an-alias-of-USHLL--USHLL2-?lang=en + // - https://developer.arm.com/documentation/ddi0602/2026-03/SIMD-FP-Instructions/SXTL--SXTL2--Signed-extend-long--an-alias-of-SSHLL--SSHLL2-?lang=en fn is_shll(instr: &str) -> bool { if cfg!(target_env = "msvc") { - instr.starts_with("ushll") || instr.starts_with("sshll") + instr == "ushll" + || instr == "ushll2" + || instr == "sshll" + || instr == "sshll2" } else { - instr.starts_with("ushll.") || instr.starts_with("sshll.") + instr == "ushll." + || instr == "ushll2." + || instr == "sshll." + || instr == "sshll2." } } match (parts.first(), parts.last()) { From b1818677386905dbc834f9aa1449daf2d669c718 Mon Sep 17 00:00:00 2001 From: David Wood Date: Tue, 14 Apr 2026 01:09:25 +0000 Subject: [PATCH 48/64] gen-arm: `assert_instr` on msvc for `[su]mull[tb]` `dumpbin.exe` produces `44a1c000`/`44e1c000`/`44a1c400`/`44e1c400` for `[su]mull[tb]` instead of the instruction name - so skip `assert_instr` for these intrinsics on MSVC targets. --- .../core_arch/src/aarch64/sve2/generated.rs | 64 +++++++++++++------ .../spec/sve2/aarch64.spec.yml | 22 +++++-- 2 files changed, 60 insertions(+), 26 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs index 79be8a88890c7..c5b0149c9c302 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs @@ -10281,7 +10281,10 @@ pub fn svmul_lane_u64(op1: svuint64_t, op2: svuint64_t) -> #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(smullb, IMM_INDEX = 0))] +#[cfg_attr( + all(test, not(target_env = "msvc")), + assert_instr(smullb, IMM_INDEX = 0) +)] pub fn svmullb_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { static_assert_range!(IMM_INDEX, 0..=7); unsafe extern "unadjusted" { @@ -10298,7 +10301,10 @@ pub fn svmullb_lane_s32(op1: svint16_t, op2: svint16_t) -> #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(smullb, IMM_INDEX = 0))] +#[cfg_attr( + all(test, not(target_env = "msvc")), + assert_instr(smullb, IMM_INDEX = 0) +)] pub fn svmullb_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { static_assert_range!(IMM_INDEX, 0..=3); unsafe extern "unadjusted" { @@ -10315,7 +10321,10 @@ pub fn svmullb_lane_s64(op1: svint32_t, op2: svint32_t) -> #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(umullb, IMM_INDEX = 0))] +#[cfg_attr( + all(test, not(target_env = "msvc")), + assert_instr(umullb, IMM_INDEX = 0) +)] pub fn svmullb_lane_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { static_assert_range!(IMM_INDEX, 0..=7); unsafe extern "unadjusted" { @@ -10332,7 +10341,10 @@ pub fn svmullb_lane_u32(op1: svuint16_t, op2: svuint16_t) #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(umullb, IMM_INDEX = 0))] +#[cfg_attr( + all(test, not(target_env = "msvc")), + assert_instr(umullb, IMM_INDEX = 0) +)] pub fn svmullb_lane_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { static_assert_range!(IMM_INDEX, 0..=3); unsafe extern "unadjusted" { @@ -10481,7 +10493,10 @@ pub fn svmullb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(smullt, IMM_INDEX = 0))] +#[cfg_attr( + all(test, not(target_env = "msvc")), + assert_instr(smullt, IMM_INDEX = 0) +)] pub fn svmullt_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { static_assert_range!(IMM_INDEX, 0..=7); unsafe extern "unadjusted" { @@ -10498,7 +10513,10 @@ pub fn svmullt_lane_s32(op1: svint16_t, op2: svint16_t) -> #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(smullt, IMM_INDEX = 0))] +#[cfg_attr( + all(test, not(target_env = "msvc")), + assert_instr(smullt, IMM_INDEX = 0) +)] pub fn svmullt_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { static_assert_range!(IMM_INDEX, 0..=3); unsafe extern "unadjusted" { @@ -10515,7 +10533,10 @@ pub fn svmullt_lane_s64(op1: svint32_t, op2: svint32_t) -> #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(umullt, IMM_INDEX = 0))] +#[cfg_attr( + all(test, not(target_env = "msvc")), + assert_instr(umullt, IMM_INDEX = 0) +)] pub fn svmullt_lane_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { static_assert_range!(IMM_INDEX, 0..=7); unsafe extern "unadjusted" { @@ -10532,7 +10553,10 @@ pub fn svmullt_lane_u32(op1: svuint16_t, op2: svuint16_t) #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(umullt, IMM_INDEX = 0))] +#[cfg_attr( + all(test, not(target_env = "msvc")), + assert_instr(umullt, IMM_INDEX = 0) +)] pub fn svmullt_lane_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { static_assert_range!(IMM_INDEX, 0..=3); unsafe extern "unadjusted" { @@ -10549,7 +10573,7 @@ pub fn svmullt_lane_u64(op1: svuint32_t, op2: svuint32_t) #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(smullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))] pub fn svmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv8i16")] @@ -10562,7 +10586,7 @@ pub fn svmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(smullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))] pub fn svmullt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { svmullt_s16(op1, svdup_n_s8(op2)) } @@ -10571,7 +10595,7 @@ pub fn svmullt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(smullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))] pub fn svmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv4i32")] @@ -10584,7 +10608,7 @@ pub fn svmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(smullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))] pub fn svmullt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { svmullt_s32(op1, svdup_n_s16(op2)) } @@ -10593,7 +10617,7 @@ pub fn svmullt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(smullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))] pub fn svmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv2i64")] @@ -10606,7 +10630,7 @@ pub fn svmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(smullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))] pub fn svmullt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { svmullt_s64(op1, svdup_n_s32(op2)) } @@ -10615,7 +10639,7 @@ pub fn svmullt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(umullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))] pub fn svmullt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv8i16")] @@ -10628,7 +10652,7 @@ pub fn svmullt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(umullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))] pub fn svmullt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { svmullt_u16(op1, svdup_n_u8(op2)) } @@ -10637,7 +10661,7 @@ pub fn svmullt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(umullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))] pub fn svmullt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv4i32")] @@ -10650,7 +10674,7 @@ pub fn svmullt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(umullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))] pub fn svmullt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { svmullt_u32(op1, svdup_n_u16(op2)) } @@ -10659,7 +10683,7 @@ pub fn svmullt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(umullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))] pub fn svmullt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv2i64")] @@ -10672,7 +10696,7 @@ pub fn svmullt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(umullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))] pub fn svmullt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { svmullt_u64(op1, svdup_n_u32(op2)) } diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/sve2/aarch64.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/sve2/aarch64.spec.yml index 6365bea21b511..269d7ff0eacbb 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/sve2/aarch64.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/sve2/aarch64.spec.yml @@ -10,6 +10,10 @@ generate_load_store_tests: true sve-unstable: &sve-unstable FnCall: [unstable, ['feature = "stdarch_aarch64_sve"', 'issue= "145052"']] +# `#[cfg_attr(all(test, not(target_env = "msvc"))]` +msvc-disabled: &msvc-disabled + FnCall: [all, [test, {FnCall: [not, ['target_env = "msvc"']]}]] + intrinsics: - name: svbext[{_n}_{type}] attr: [*sve-unstable] @@ -2429,7 +2433,10 @@ intrinsics: - LLVMLink: { name: "{type_kind[0].su}mullb.{sve_type[0]}" } - name: svmullb_lane[_{type[0]}] - attr: [*sve-unstable] + attr: + - *sve-unstable + # FIXME(arm-maintainers): MSVC disassembly of `[su]mullb` fails + - FnCall: [cfg_attr, [*msvc-disabled, {FnCall: [assert_instr, ["{type_kind[0].su}mullb", "IMM_INDEX = 0"]]}]] doc: Multiply long (bottom) arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] return_type: "{sve_type[0]}" @@ -2440,7 +2447,6 @@ intrinsics: - [u64, u32] static_defs: ["const IMM_INDEX: i32"] constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] - assert_instr: [["{type_kind[0].su}mullb", "IMM_INDEX = 0"]] compose: - LLVMLink: name: "{type_kind[0].su}mullb.lane.{sve_type[0]}" @@ -2449,7 +2455,10 @@ intrinsics: - FnCall: ["{llvm_link}", [$op1, $op2, $IMM_INDEX]] - name: svmullt[{_n}_{type[0]}] - attr: [*sve-unstable] + attr: + - *sve-unstable + # FIXME(arm-maintainers): MSVC disassembly of `[su]mullt` fails + - FnCall: [cfg_attr, [*msvc-disabled, {FnCall: [assert_instr, ["{type_kind[0].su}mullt"]]}]] doc: Multiply long (top) arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] return_type: "{sve_type[0]}" @@ -2460,13 +2469,15 @@ intrinsics: - [u16, u8] - [u32, u16] - [u64, u32] - assert_instr: ["{type_kind[0].su}mullt"] n_variant_op: op2 compose: - LLVMLink: { name: "{type_kind[0].su}mullt.{sve_type[0]}" } - name: svmullt_lane[_{type[0]}] - attr: [*sve-unstable] + attr: + - *sve-unstable + # FIXME(arm-maintainers): MSVC disassembly of `[su]mullt` fails + - FnCall: [cfg_attr, [*msvc-disabled, {FnCall: [assert_instr, ["{type_kind[0].su}mullt", "IMM_INDEX = 0"]]}]] doc: Multiply long (top) arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] return_type: "{sve_type[0]}" @@ -2477,7 +2488,6 @@ intrinsics: - [u64, u32] static_defs: ["const IMM_INDEX: i32"] constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] - assert_instr: [["{type_kind[0].su}mullt", "IMM_INDEX = 0"]] compose: - LLVMLink: name: "{type_kind[0].su}mullt.lane.{sve_type[0]}" From b0d91aa86c5344499bf5cbec62ff650521daf880 Mon Sep 17 00:00:00 2001 From: David Wood Date: Tue, 14 Apr 2026 04:05:07 +0000 Subject: [PATCH 49/64] core_arch: disable ld/st tests on msvc There seemed to be non-deterministic failures on MSVC that looked like corruption of the FFR in CI. Until this can be investigated, to avoid any spurious failures, these tests are disabled. --- library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs index a3f70ab61c40f..04a92359a0228 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs @@ -374,6 +374,9 @@ pub enum svprfop { SV_PSTL3STRM = 13, } -#[cfg(test)] +// FIXME(arm-maintainers): On MSVC targets, it seemed like spurious corruption of the FFR was being +// observed non-deterministically on CI. Disabling these tests out of caution on that platform until +// it is investigated. +#[cfg(all(test, not(target_env = "msvc")))] #[path = "ld_st_tests_aarch64.rs"] mod ld_st_tests; From 69aad1af38330013324fdd82ff86866a369e03bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eduardo=20S=C3=A1nchez=20Mu=C3=B1oz?= Date: Sat, 11 Apr 2026 17:22:53 +0200 Subject: [PATCH 50/64] Fix MIPS tests Tests failed to build due to use of legacy const generics syntax, out-of-bounds immediates and incorrect name of const generics in `assert_instr` --- .../mips64-unknown-linux-gnuabi64/Dockerfile | 2 +- .../Dockerfile | 2 +- library/stdarch/crates/core_arch/src/lib.rs | 6 +- .../stdarch/crates/core_arch/src/mips/msa.rs | 550 +++++++++--------- .../stdarch/crates/simd-test-macro/src/lib.rs | 1 + 5 files changed, 286 insertions(+), 275 deletions(-) diff --git a/library/stdarch/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile b/library/stdarch/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile index a8b352881e813..8bcd6409453c4 100644 --- a/library/stdarch/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile +++ b/library/stdarch/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:25.10 +FROM ubuntu:25.04 # gcc-mips64-linux-gnuabi64 not available in 25.10 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc libc6-dev qemu-user ca-certificates \ diff --git a/library/stdarch/ci/docker/mips64el-unknown-linux-gnuabi64/Dockerfile b/library/stdarch/ci/docker/mips64el-unknown-linux-gnuabi64/Dockerfile index 147a3df614554..9aa0ce05783c5 100644 --- a/library/stdarch/ci/docker/mips64el-unknown-linux-gnuabi64/Dockerfile +++ b/library/stdarch/ci/docker/mips64el-unknown-linux-gnuabi64/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:25.10 +FROM ubuntu:25.04 # gcc-mips64el-linux-gnuabi64 not available in 25.10 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc libc6-dev qemu-user ca-certificates \ diff --git a/library/stdarch/crates/core_arch/src/lib.rs b/library/stdarch/crates/core_arch/src/lib.rs index 9255994e5ee81..d0a4ac9260781 100644 --- a/library/stdarch/crates/core_arch/src/lib.rs +++ b/library/stdarch/crates/core_arch/src/lib.rs @@ -70,7 +70,11 @@ )] #![cfg_attr( test, - feature(stdarch_arm_feature_detection, stdarch_powerpc_feature_detection,) + feature( + stdarch_arm_feature_detection, + stdarch_mips_feature_detection, + stdarch_powerpc_feature_detection, + ) )] #[cfg(test)] diff --git a/library/stdarch/crates/core_arch/src/mips/msa.rs b/library/stdarch/crates/core_arch/src/mips/msa.rs index 6246433da5585..bc601baef9e2a 100644 --- a/library/stdarch/crates/core_arch/src/mips/msa.rs +++ b/library/stdarch/crates/core_arch/src/mips/msa.rs @@ -1407,7 +1407,7 @@ pub unsafe fn __msa_addv_d(a: v2i64, b: v2i64) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(addvi.b, imm5 = 0b10111))] +#[cfg_attr(test, assert_instr(addvi.b, IMM5 = 0b10111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_addvi_b(a: v16i8) -> v16i8 { @@ -1423,7 +1423,7 @@ pub unsafe fn __msa_addvi_b(a: v16i8) -> v16i8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(addvi.h, imm5 = 0b10111))] +#[cfg_attr(test, assert_instr(addvi.h, IMM5 = 0b10111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_addvi_h(a: v8i16) -> v8i16 { @@ -1439,7 +1439,7 @@ pub unsafe fn __msa_addvi_h(a: v8i16) -> v8i16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(addvi.w, imm5 = 0b10111))] +#[cfg_attr(test, assert_instr(addvi.w, IMM5 = 0b10111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_addvi_w(a: v4i32) -> v4i32 { @@ -1455,7 +1455,7 @@ pub unsafe fn __msa_addvi_w(a: v4i32) -> v4i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(addvi.d, imm5 = 0b10111))] +#[cfg_attr(test, assert_instr(addvi.d, IMM5 = 0b10111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_addvi_d(a: v2i64) -> v2i64 { @@ -1486,7 +1486,7 @@ pub unsafe fn __msa_and_v(a: v16u8, b: v16u8) -> v16u8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(andi.b, imm8 = 0b10010111))] +#[cfg_attr(test, assert_instr(andi.b, IMM8 = 0b10010111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_andi_b(a: v16u8) -> v16u8 { @@ -1938,7 +1938,7 @@ pub unsafe fn __msa_bclr_d(a: v2u64, b: v2u64) -> v2u64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(bclri.b, imm3 = 0b111))] +#[cfg_attr(test, assert_instr(bclri.b, IMM3 = 0b111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bclri_b(a: v16u8) -> v16u8 { @@ -1954,7 +1954,7 @@ pub unsafe fn __msa_bclri_b(a: v16u8) -> v16u8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(bclri.h, imm4 = 0b1111))] +#[cfg_attr(test, assert_instr(bclri.h, IMM4 = 0b1111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bclri_h(a: v8u16) -> v8u16 { @@ -1970,7 +1970,7 @@ pub unsafe fn __msa_bclri_h(a: v8u16) -> v8u16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(bclri.w, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(bclri.w, IMM5 = 0b11111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bclri_w(a: v4u32) -> v4u32 { @@ -1986,7 +1986,7 @@ pub unsafe fn __msa_bclri_w(a: v4u32) -> v4u32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(bclri.d, imm6 = 0b111111))] +#[cfg_attr(test, assert_instr(bclri.d, IMM6 = 0b111111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bclri_d(a: v2u64) -> v2u64 { @@ -2062,7 +2062,7 @@ pub unsafe fn __msa_binsl_d(a: v2u64, b: v2u64, c: v2u64) -> v2u64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(binsli.b, imm3 = 0b111))] +#[cfg_attr(test, assert_instr(binsli.b, IMM3 = 0b111))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsli_b(a: v16u8, b: v16u8) -> v16u8 { @@ -2078,7 +2078,7 @@ pub unsafe fn __msa_binsli_b(a: v16u8, b: v16u8) -> v16u8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(binsli.h, imm4 = 0b1111))] +#[cfg_attr(test, assert_instr(binsli.h, IMM4 = 0b1111))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsli_h(a: v8u16, b: v8u16) -> v8u16 { @@ -2094,7 +2094,7 @@ pub unsafe fn __msa_binsli_h(a: v8u16, b: v8u16) -> v8u16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(binsli.w, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(binsli.w, IMM5 = 0b11111))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsli_w(a: v4u32, b: v4u32) -> v4u32 { @@ -2110,7 +2110,7 @@ pub unsafe fn __msa_binsli_w(a: v4u32, b: v4u32) -> v4u32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(binsli.d, imm6 = 0b111111))] +#[cfg_attr(test, assert_instr(binsli.d, IMM6 = 0b111111))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsli_d(a: v2u64, b: v2u64) -> v2u64 { @@ -2186,7 +2186,7 @@ pub unsafe fn __msa_binsr_d(a: v2u64, b: v2u64, c: v2u64) -> v2u64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(binsri.b, imm3 = 0b111))] +#[cfg_attr(test, assert_instr(binsri.b, IMM3 = 0b111))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsri_b(a: v16u8, b: v16u8) -> v16u8 { @@ -2202,7 +2202,7 @@ pub unsafe fn __msa_binsri_b(a: v16u8, b: v16u8) -> v16u8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(binsri.h, imm4 = 0b1111))] +#[cfg_attr(test, assert_instr(binsri.h, IMM4 = 0b1111))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsri_h(a: v8u16, b: v8u16) -> v8u16 { @@ -2218,7 +2218,7 @@ pub unsafe fn __msa_binsri_h(a: v8u16, b: v8u16) -> v8u16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(binsri.w, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(binsri.w, IMM5 = 0b11111))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsri_w(a: v4u32, b: v4u32) -> v4u32 { @@ -2234,7 +2234,7 @@ pub unsafe fn __msa_binsri_w(a: v4u32, b: v4u32) -> v4u32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(binsri.d, imm6 = 0b111111))] +#[cfg_attr(test, assert_instr(binsri.d, IMM6 = 0b111111))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsri_d(a: v2u64, b: v2u64) -> v2u64 { @@ -2265,7 +2265,7 @@ pub unsafe fn __msa_bmnz_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(bmnzi.b, imm8 = 0b11111111))] +#[cfg_attr(test, assert_instr(bmnzi.b, IMM8 = 0b11111111))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bmnzi_b(a: v16u8, b: v16u8) -> v16u8 { @@ -2296,7 +2296,7 @@ pub unsafe fn __msa_bmz_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(bmzi.b, imm8 = 0b11111111))] +#[cfg_attr(test, assert_instr(bmzi.b, IMM8 = 0b11111111))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bmzi_b(a: v16u8, b: v16u8) -> v16u8 { @@ -2372,7 +2372,7 @@ pub unsafe fn __msa_bneg_d(a: v2u64, b: v2u64) -> v2u64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(bnegi.b, imm3 = 0b111))] +#[cfg_attr(test, assert_instr(bnegi.b, IMM3 = 0b111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bnegi_b(a: v16u8) -> v16u8 { @@ -2388,7 +2388,7 @@ pub unsafe fn __msa_bnegi_b(a: v16u8) -> v16u8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(bnegi.h, imm4 = 0b1111))] +#[cfg_attr(test, assert_instr(bnegi.h, IMM4 = 0b1111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bnegi_h(a: v8u16) -> v8u16 { @@ -2404,7 +2404,7 @@ pub unsafe fn __msa_bnegi_h(a: v8u16) -> v8u16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(bnegi.w, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(bnegi.w, IMM5 = 0b11111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bnegi_w(a: v4u32) -> v4u32 { @@ -2420,7 +2420,7 @@ pub unsafe fn __msa_bnegi_w(a: v4u32) -> v4u32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(bnegi.d, imm6 = 0b111111))] +#[cfg_attr(test, assert_instr(bnegi.d, IMM6 = 0b111111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bnegi_d(a: v2u64) -> v2u64 { @@ -2512,7 +2512,7 @@ pub unsafe fn __msa_bsel_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(bseli.b, imm8 = 0b11111111))] +#[cfg_attr(test, assert_instr(bseli.b, IMM8 = 0b11111111))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bseli_b(a: v16u8, b: v16u8) -> v16u8 { @@ -2588,7 +2588,7 @@ pub unsafe fn __msa_bset_d(a: v2u64, b: v2u64) -> v2u64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(bseti.b, imm3 = 0b111))] +#[cfg_attr(test, assert_instr(bseti.b, IMM3 = 0b111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bseti_b(a: v16u8) -> v16u8 { @@ -2604,7 +2604,7 @@ pub unsafe fn __msa_bseti_b(a: v16u8) -> v16u8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(bseti.h, imm4 = 0b1111))] +#[cfg_attr(test, assert_instr(bseti.h, IMM4 = 0b1111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bseti_h(a: v8u16) -> v8u16 { @@ -2620,7 +2620,7 @@ pub unsafe fn __msa_bseti_h(a: v8u16) -> v8u16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(bseti.w, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(bseti.w, IMM5 = 0b11111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bseti_w(a: v4u32) -> v4u32 { @@ -2636,7 +2636,7 @@ pub unsafe fn __msa_bseti_w(a: v4u32) -> v4u32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(bseti.d, imm6 = 0b111111))] +#[cfg_attr(test, assert_instr(bseti.d, IMM6 = 0b111111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bseti_d(a: v2u64) -> v2u64 { @@ -2769,7 +2769,7 @@ pub unsafe fn __msa_ceq_d(a: v2i64, b: v2i64) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(ceqi.b, imm_s5 = 0b11111))] +#[cfg_attr(test, assert_instr(ceqi.b, IMM_S5 = -1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ceqi_b(a: v16i8) -> v16i8 { @@ -2785,7 +2785,7 @@ pub unsafe fn __msa_ceqi_b(a: v16i8) -> v16i8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(ceqi.h, imm_s5 = 0b11111))] +#[cfg_attr(test, assert_instr(ceqi.h, IMM_S5 = -1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ceqi_h(a: v8i16) -> v8i16 { @@ -2801,7 +2801,7 @@ pub unsafe fn __msa_ceqi_h(a: v8i16) -> v8i16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(ceqi.w, imm_s5 = 0b11111))] +#[cfg_attr(test, assert_instr(ceqi.w, IMM_S5 = -1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ceqi_w(a: v4i32) -> v4i32 { @@ -2817,7 +2817,7 @@ pub unsafe fn __msa_ceqi_w(a: v4i32) -> v4i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(ceqi.d, imm_s5 = 0b11111))] +#[cfg_attr(test, assert_instr(ceqi.d, IMM_S5 = -1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ceqi_d(a: v2i64) -> v2i64 { @@ -2832,7 +2832,7 @@ pub unsafe fn __msa_ceqi_d(a: v2i64) -> v2i64 { /// Can not be tested in user mode #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(cfcmsa, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(cfcmsa, IMM5 = 0b11111))] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_cfcmsa() -> i32 { @@ -2969,7 +2969,7 @@ pub unsafe fn __msa_cle_u_d(a: v2u64, b: v2u64) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(clei_s.b, imm_s5 = 0b11111))] +#[cfg_attr(test, assert_instr(clei_s.b, IMM_S5 = -1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clei_s_b(a: v16i8) -> v16i8 { @@ -2986,7 +2986,7 @@ pub unsafe fn __msa_clei_s_b(a: v16i8) -> v16i8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(clei_s.h, imm_s5 = 0b11111))] +#[cfg_attr(test, assert_instr(clei_s.h, IMM_S5 = -1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clei_s_h(a: v8i16) -> v8i16 { @@ -3003,7 +3003,7 @@ pub unsafe fn __msa_clei_s_h(a: v8i16) -> v8i16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(clei_s.w, imm_s5 = 0b11111))] +#[cfg_attr(test, assert_instr(clei_s.w, IMM_S5 = -1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clei_s_w(a: v4i32) -> v4i32 { @@ -3020,7 +3020,7 @@ pub unsafe fn __msa_clei_s_w(a: v4i32) -> v4i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(clei_s.d, imm_s5 = 0b11111))] +#[cfg_attr(test, assert_instr(clei_s.d, IMM_S5 = -1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clei_s_d(a: v2i64) -> v2i64 { @@ -3037,7 +3037,7 @@ pub unsafe fn __msa_clei_s_d(a: v2i64) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(clei_u.b, imm5 = 0b111))] +#[cfg_attr(test, assert_instr(clei_u.b, IMM5 = 0b111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clei_u_b(a: v16u8) -> v16i8 { @@ -3054,7 +3054,7 @@ pub unsafe fn __msa_clei_u_b(a: v16u8) -> v16i8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(clei_u.h, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(clei_u.h, IMM5 = 0b11111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clei_u_h(a: v8u16) -> v8i16 { @@ -3071,7 +3071,7 @@ pub unsafe fn __msa_clei_u_h(a: v8u16) -> v8i16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(clei_u.w, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(clei_u.w, IMM5 = 0b11111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clei_u_w(a: v4u32) -> v4i32 { @@ -3088,7 +3088,7 @@ pub unsafe fn __msa_clei_u_w(a: v4u32) -> v4i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(clei_u.d, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(clei_u.d, IMM5 = 0b11111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clei_u_d(a: v2u64) -> v2i64 { @@ -3225,7 +3225,7 @@ pub unsafe fn __msa_clt_u_d(a: v2u64, b: v2u64) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(clti_s.b, imm_s5 = 0b111))] +#[cfg_attr(test, assert_instr(clti_s.b, IMM_S5 = 0b111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clti_s_b(a: v16i8) -> v16i8 { @@ -3242,7 +3242,7 @@ pub unsafe fn __msa_clti_s_b(a: v16i8) -> v16i8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(clti_s.h, imm_s5 = 0b11111))] +#[cfg_attr(test, assert_instr(clti_s.h, IMM_S5 = -1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clti_s_h(a: v8i16) -> v8i16 { @@ -3259,7 +3259,7 @@ pub unsafe fn __msa_clti_s_h(a: v8i16) -> v8i16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(clti_s.w, imm_s5 = 0b11111))] +#[cfg_attr(test, assert_instr(clti_s.w, IMM_S5 = -1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clti_s_w(a: v4i32) -> v4i32 { @@ -3276,7 +3276,7 @@ pub unsafe fn __msa_clti_s_w(a: v4i32) -> v4i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(clti_s.d, imm_s5 = 0b11111))] +#[cfg_attr(test, assert_instr(clti_s.d, IMM_S5 = -1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clti_s_d(a: v2i64) -> v2i64 { @@ -3293,7 +3293,7 @@ pub unsafe fn __msa_clti_s_d(a: v2i64) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(clti_u.b, imm5 = 0b111))] +#[cfg_attr(test, assert_instr(clti_u.b, IMM5 = 0b111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clti_u_b(a: v16u8) -> v16i8 { @@ -3310,7 +3310,7 @@ pub unsafe fn __msa_clti_u_b(a: v16u8) -> v16i8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(clti_u.h, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(clti_u.h, IMM5 = 0b11111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clti_u_h(a: v8u16) -> v8i16 { @@ -3327,7 +3327,7 @@ pub unsafe fn __msa_clti_u_h(a: v8u16) -> v8i16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(clti_u.w, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(clti_u.w, IMM5 = 0b11111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clti_u_w(a: v4u32) -> v4i32 { @@ -3344,7 +3344,7 @@ pub unsafe fn __msa_clti_u_w(a: v4u32) -> v4i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(clti_u.d, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(clti_u.d, IMM5 = 0b11111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clti_u_d(a: v2u64) -> v2i64 { @@ -3359,7 +3359,7 @@ pub unsafe fn __msa_clti_u_d(a: v2u64) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(copy_s.b, imm4 = 0b1111))] +#[cfg_attr(test, assert_instr(copy_s.b, IMM4 = 0b1111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_copy_s_b(a: v16i8) -> i32 { @@ -3374,7 +3374,7 @@ pub unsafe fn __msa_copy_s_b(a: v16i8) -> i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(copy_s.h, imm3 = 0b111))] +#[cfg_attr(test, assert_instr(copy_s.h, IMM3 = 0b111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_copy_s_h(a: v8i16) -> i32 { @@ -3389,7 +3389,7 @@ pub unsafe fn __msa_copy_s_h(a: v8i16) -> i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(copy_s.w, imm2 = 0b11))] +#[cfg_attr(test, assert_instr(copy_s.w, IMM2 = 0b11))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_copy_s_w(a: v4i32) -> i32 { @@ -3404,7 +3404,7 @@ pub unsafe fn __msa_copy_s_w(a: v4i32) -> i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(copy_s.d, imm1 = 0b1))] +#[cfg_attr(test, assert_instr(copy_s.d, IMM1 = 0b1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_copy_s_d(a: v2i64) -> i64 { @@ -3419,7 +3419,7 @@ pub unsafe fn __msa_copy_s_d(a: v2i64) -> i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(copy_u.b, imm4 = 0b1111))] +#[cfg_attr(test, assert_instr(copy_u.b, IMM4 = 0b1111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_copy_u_b(a: v16i8) -> u32 { @@ -3434,7 +3434,7 @@ pub unsafe fn __msa_copy_u_b(a: v16i8) -> u32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(copy_u.h, imm3 = 0b111))] +#[cfg_attr(test, assert_instr(copy_u.h, IMM3 = 0b111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_copy_u_h(a: v8i16) -> u32 { @@ -3449,7 +3449,7 @@ pub unsafe fn __msa_copy_u_h(a: v8i16) -> u32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(copy_u.w, imm2 = 0b11))] +#[cfg_attr(test, assert_instr(copy_u.w, IMM2 = 0b11))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_copy_u_w(a: v4i32) -> u32 { @@ -3464,7 +3464,7 @@ pub unsafe fn __msa_copy_u_w(a: v4i32) -> u32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(copy_u.d, imm1 = 0b1))] +#[cfg_attr(test, assert_instr(copy_u.d, IMM1 = 0b1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_copy_u_d(a: v2i64) -> u64 { @@ -3481,7 +3481,7 @@ pub unsafe fn __msa_copy_u_d(a: v2i64) -> u64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(ctcmsa, imm1 = 0b1))] +#[cfg_attr(test, assert_instr(ctcmsa, IMM5 = 0b1))] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ctcmsa(a: i32) -> () { @@ -5855,7 +5855,7 @@ pub unsafe fn __msa_ilvr_d(a: v2i64, b: v2i64) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(insert.b, imm4 = 0b1111))] +#[cfg_attr(test, assert_instr(insert.b, IMM4 = 0b1111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_insert_b(a: v16i8, c: i32) -> v16i8 { @@ -5871,7 +5871,7 @@ pub unsafe fn __msa_insert_b(a: v16i8, c: i32) -> v16i8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(insert.h, imm3 = 0b111))] +#[cfg_attr(test, assert_instr(insert.h, IMM3 = 0b111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_insert_h(a: v8i16, c: i32) -> v8i16 { @@ -5887,7 +5887,7 @@ pub unsafe fn __msa_insert_h(a: v8i16, c: i32) -> v8i16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(insert.w, imm2 = 0b11))] +#[cfg_attr(test, assert_instr(insert.w, IMM2 = 0b11))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_insert_w(a: v4i32, c: i32) -> v4i32 { @@ -5903,7 +5903,7 @@ pub unsafe fn __msa_insert_w(a: v4i32, c: i32) -> v4i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(insert.d, imm1 = 0b1))] +#[cfg_attr(test, assert_instr(insert.d, IMM1 = 0b1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_insert_d(a: v2i64, c: i64) -> v2i64 { @@ -5919,7 +5919,7 @@ pub unsafe fn __msa_insert_d(a: v2i64, c: i64) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(insve.b, imm4 = 0b1111))] +#[cfg_attr(test, assert_instr(insve.b, IMM4 = 0b1111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_insve_b(a: v16i8, c: v16i8) -> v16i8 { @@ -5935,7 +5935,7 @@ pub unsafe fn __msa_insve_b(a: v16i8, c: v16i8) -> v16i8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(insve.h, imm3 = 0b111))] +#[cfg_attr(test, assert_instr(insve.h, IMM3 = 0b111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_insve_h(a: v8i16, c: v8i16) -> v8i16 { @@ -5951,7 +5951,7 @@ pub unsafe fn __msa_insve_h(a: v8i16, c: v8i16) -> v8i16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(insve.w, imm2 = 0b11))] +#[cfg_attr(test, assert_instr(insve.w, IMM2 = 0b11))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_insve_w(a: v4i32, c: v4i32) -> v4i32 { @@ -5967,7 +5967,7 @@ pub unsafe fn __msa_insve_w(a: v4i32, c: v4i32) -> v4i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(insve.d, imm1 = 0b1))] +#[cfg_attr(test, assert_instr(insve.d, IMM1 = 0b1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_insve_d(a: v2i64, c: v2i64) -> v2i64 { @@ -5983,7 +5983,7 @@ pub unsafe fn __msa_insve_d(a: v2i64, c: v2i64) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(ld.b, imm_s10 = 0b1111111111))] +#[cfg_attr(test, assert_instr(ld.b, IMM_S10 = -1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ld_b(mem_addr: *mut u8) -> v16i8 { @@ -5999,7 +5999,7 @@ pub unsafe fn __msa_ld_b(mem_addr: *mut u8) -> v16i8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(ld.h, imm_s11 = 0b11111111111))] +#[cfg_attr(test, assert_instr(ld.h, IMM_S11 = -2))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ld_h(mem_addr: *mut u8) -> v8i16 { @@ -6016,7 +6016,7 @@ pub unsafe fn __msa_ld_h(mem_addr: *mut u8) -> v8i16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(ld.w, imm_s12 = 0b111111111111))] +#[cfg_attr(test, assert_instr(ld.w, IMM_S12 = -4))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ld_w(mem_addr: *mut u8) -> v4i32 { @@ -6033,7 +6033,7 @@ pub unsafe fn __msa_ld_w(mem_addr: *mut u8) -> v4i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(ld.d, imm_s13 = 0b1111111111111))] +#[cfg_attr(test, assert_instr(ld.d, IMM_S13 = -8))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ld_d(mem_addr: *mut u8) -> v2i64 { @@ -6050,7 +6050,7 @@ pub unsafe fn __msa_ld_d(mem_addr: *mut u8) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(ldi.b, imm_s10 = 0b1111111111))] +#[cfg_attr(test, assert_instr(ldi.b, IMM_S10 = -1))] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ldi_b() -> v16i8 { @@ -6066,7 +6066,7 @@ pub unsafe fn __msa_ldi_b() -> v16i8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(ldi.h, imm_s10 = 0b1111111111))] +#[cfg_attr(test, assert_instr(ldi.h, IMM_S10 = -1))] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ldi_h() -> v8i16 { @@ -6082,7 +6082,7 @@ pub unsafe fn __msa_ldi_h() -> v8i16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(ldi.w, imm_s10 = 0b1111111111))] +#[cfg_attr(test, assert_instr(ldi.w, IMM_S10 = -1))] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ldi_w() -> v4i32 { @@ -6098,7 +6098,7 @@ pub unsafe fn __msa_ldi_w() -> v4i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(ldi.d, imm_s10 = 0b1111111111))] +#[cfg_attr(test, assert_instr(ldi.d, IMM_S10 = -1))] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ldi_d() -> v2i64 { @@ -6410,7 +6410,7 @@ pub unsafe fn __msa_max_u_d(a: v2u64, b: v2u64) -> v2u64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(maxi_s.b, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(maxi_s.b, IMM_S5 = -1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_maxi_s_b(a: v16i8) -> v16i8 { @@ -6426,7 +6426,7 @@ pub unsafe fn __msa_maxi_s_b(a: v16i8) -> v16i8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(maxi_s.h, imm_s5 = 0b11111))] +#[cfg_attr(test, assert_instr(maxi_s.h, IMM_S5 = -1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_maxi_s_h(a: v8i16) -> v8i16 { @@ -6442,7 +6442,7 @@ pub unsafe fn __msa_maxi_s_h(a: v8i16) -> v8i16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(maxi_s.w, imm_s5 = 0b11111))] +#[cfg_attr(test, assert_instr(maxi_s.w, IMM_S5 = -1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_maxi_s_w(a: v4i32) -> v4i32 { @@ -6458,7 +6458,7 @@ pub unsafe fn __msa_maxi_s_w(a: v4i32) -> v4i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(maxi_s.d, imm_s5 = 0b11111))] +#[cfg_attr(test, assert_instr(maxi_s.d, IMM_S5 = -1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_maxi_s_d(a: v2i64) -> v2i64 { @@ -6474,7 +6474,7 @@ pub unsafe fn __msa_maxi_s_d(a: v2i64) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(maxi_u.b, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(maxi_u.b, IMM5 = 0b11111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_maxi_u_b(a: v16u8) -> v16u8 { @@ -6490,7 +6490,7 @@ pub unsafe fn __msa_maxi_u_b(a: v16u8) -> v16u8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(maxi_u.h, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(maxi_u.h, IMM5 = 0b11111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_maxi_u_h(a: v8u16) -> v8u16 { @@ -6506,7 +6506,7 @@ pub unsafe fn __msa_maxi_u_h(a: v8u16) -> v8u16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(maxi_u.w, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(maxi_u.w, IMM5 = 0b11111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_maxi_u_w(a: v4u32) -> v4u32 { @@ -6522,7 +6522,7 @@ pub unsafe fn __msa_maxi_u_w(a: v4u32) -> v4u32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(maxi_u.d, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(maxi_u.d, IMM5 = 0b11111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_maxi_u_d(a: v2u64) -> v2u64 { @@ -6654,7 +6654,7 @@ pub unsafe fn __msa_min_s_d(a: v2i64, b: v2i64) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(mini_s.b, imm_s5 = 0b11111))] +#[cfg_attr(test, assert_instr(mini_s.b, IMM_S5 = -1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mini_s_b(a: v16i8) -> v16i8 { @@ -6670,7 +6670,7 @@ pub unsafe fn __msa_mini_s_b(a: v16i8) -> v16i8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(mini_s.h, imm_s5 = 0b11111))] +#[cfg_attr(test, assert_instr(mini_s.h, IMM_S5 = -1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mini_s_h(a: v8i16) -> v8i16 { @@ -6686,7 +6686,7 @@ pub unsafe fn __msa_mini_s_h(a: v8i16) -> v8i16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(mini_s.w, imm_s5 = 0b11111))] +#[cfg_attr(test, assert_instr(mini_s.w, IMM_S5 = -1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mini_s_w(a: v4i32) -> v4i32 { @@ -6702,7 +6702,7 @@ pub unsafe fn __msa_mini_s_w(a: v4i32) -> v4i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(mini_s.d, imm_s5 = 0b11111))] +#[cfg_attr(test, assert_instr(mini_s.d, IMM_S5 = -1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mini_s_d(a: v2i64) -> v2i64 { @@ -6774,7 +6774,7 @@ pub unsafe fn __msa_min_u_d(a: v2u64, b: v2u64) -> v2u64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(mini_u.b, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(mini_u.b, IMM5 = 0b11111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mini_u_b(a: v16u8) -> v16u8 { @@ -6790,7 +6790,7 @@ pub unsafe fn __msa_mini_u_b(a: v16u8) -> v16u8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(mini_u.h, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(mini_u.h, IMM5 = 0b11111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mini_u_h(a: v8u16) -> v8u16 { @@ -6806,7 +6806,7 @@ pub unsafe fn __msa_mini_u_h(a: v8u16) -> v8u16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(mini_u.w, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(mini_u.w, IMM5 = 0b11111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mini_u_w(a: v4u32) -> v4u32 { @@ -6822,7 +6822,7 @@ pub unsafe fn __msa_mini_u_w(a: v4u32) -> v4u32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(mini_u.d, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(mini_u.d, IMM5 = 0b11111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mini_u_d(a: v2u64) -> v2u64 { @@ -7343,7 +7343,7 @@ pub unsafe fn __msa_nor_v(a: v16u8, b: v16u8) -> v16u8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(nori.b, imm8 = 0b11111111))] +#[cfg_attr(test, assert_instr(nori.b, IMM8 = 0b11111111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_nori_b(a: v16u8) -> v16u8 { @@ -7375,7 +7375,7 @@ pub unsafe fn __msa_or_v(a: v16u8, b: v16u8) -> v16u8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(ori.b, imm8 = 0b11111111))] +#[cfg_attr(test, assert_instr(ori.b, IMM8 = 0b11111111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ori_b(a: v16u8) -> v16u8 { @@ -7555,7 +7555,7 @@ pub unsafe fn __msa_pcnt_d(a: v2i64) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(sat_s.b, imm4 = 0b111))] +#[cfg_attr(test, assert_instr(sat_s.b, IMM3 = 0b111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sat_s_b(a: v16i8) -> v16i8 { @@ -7571,7 +7571,7 @@ pub unsafe fn __msa_sat_s_b(a: v16i8) -> v16i8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(sat_s.h, imm3 = 0b1111))] +#[cfg_attr(test, assert_instr(sat_s.h, IMM4 = 0b1111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sat_s_h(a: v8i16) -> v8i16 { @@ -7587,7 +7587,7 @@ pub unsafe fn __msa_sat_s_h(a: v8i16) -> v8i16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(sat_s.w, imm2 = 0b11111))] +#[cfg_attr(test, assert_instr(sat_s.w, IMM5 = 0b11111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sat_s_w(a: v4i32) -> v4i32 { @@ -7603,7 +7603,7 @@ pub unsafe fn __msa_sat_s_w(a: v4i32) -> v4i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(sat_s.d, imm1 = 0b111111))] +#[cfg_attr(test, assert_instr(sat_s.d, IMM6 = 0b111111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sat_s_d(a: v2i64) -> v2i64 { @@ -7619,7 +7619,7 @@ pub unsafe fn __msa_sat_s_d(a: v2i64) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(sat_u.b, imm4 = 0b111))] +#[cfg_attr(test, assert_instr(sat_u.b, IMM3 = 0b111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sat_u_b(a: v16u8) -> v16u8 { @@ -7635,7 +7635,7 @@ pub unsafe fn __msa_sat_u_b(a: v16u8) -> v16u8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(sat_u.h, imm3 = 0b1111))] +#[cfg_attr(test, assert_instr(sat_u.h, IMM4 = 0b1111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sat_u_h(a: v8u16) -> v8u16 { @@ -7651,7 +7651,7 @@ pub unsafe fn __msa_sat_u_h(a: v8u16) -> v8u16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(sat_u.w, imm2 = 0b11111))] +#[cfg_attr(test, assert_instr(sat_u.w, IMM5 = 0b11111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sat_u_w(a: v4u32) -> v4u32 { @@ -7667,7 +7667,7 @@ pub unsafe fn __msa_sat_u_w(a: v4u32) -> v4u32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(sat_u.d, imm1 = 0b111111))] +#[cfg_attr(test, assert_instr(sat_u.d, IMM6 = 0b111111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sat_u_d(a: v2u64) -> v2u64 { @@ -7684,7 +7684,7 @@ pub unsafe fn __msa_sat_u_d(a: v2u64) -> v2u64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(shf.b, imm8 = 0b11111111))] +#[cfg_attr(test, assert_instr(shf.b, IMM8 = 0b11111111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_shf_b(a: v16i8) -> v16i8 { @@ -7701,7 +7701,7 @@ pub unsafe fn __msa_shf_b(a: v16i8) -> v16i8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(shf.h, imm8 = 0b11111111))] +#[cfg_attr(test, assert_instr(shf.h, IMM8 = 0b11111111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_shf_h(a: v8i16) -> v8i16 { @@ -7718,7 +7718,7 @@ pub unsafe fn __msa_shf_h(a: v8i16) -> v8i16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(shf.w, imm8 = 0b11111111))] +#[cfg_attr(test, assert_instr(shf.w, IMM8 = 0b11111111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_shf_w(a: v4i32) -> v4i32 { @@ -7823,7 +7823,7 @@ pub unsafe fn __msa_sld_d(a: v2i64, b: v2i64, c: i32) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(sldi.b, imm4 = 0b1111))] +#[cfg_attr(test, assert_instr(sldi.b, IMM4 = 0b1111))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sldi_b(a: v16i8, b: v16i8) -> v16i8 { @@ -7844,7 +7844,7 @@ pub unsafe fn __msa_sldi_b(a: v16i8, b: v16i8) -> v16i8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(sldi.h, imm3 = 0b111))] +#[cfg_attr(test, assert_instr(sldi.h, IMM3 = 0b111))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sldi_h(a: v8i16, b: v8i16) -> v8i16 { @@ -7865,7 +7865,7 @@ pub unsafe fn __msa_sldi_h(a: v8i16, b: v8i16) -> v8i16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(sldi.w, imm2 = 0b11))] +#[cfg_attr(test, assert_instr(sldi.w, IMM2 = 0b11))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sldi_w(a: v4i32, b: v4i32) -> v4i32 { @@ -7886,7 +7886,7 @@ pub unsafe fn __msa_sldi_w(a: v4i32, b: v4i32) -> v4i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(sldi.d, imm1 = 0b1))] +#[cfg_attr(test, assert_instr(sldi.d, IMM1 = 0b1))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sldi_d(a: v2i64, b: v2i64) -> v2i64 { @@ -7962,7 +7962,7 @@ pub unsafe fn __msa_sll_d(a: v2i64, b: v2i64) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(slli.b, imm4 = 0b1111))] +#[cfg_attr(test, assert_instr(slli.b, IMM4 = 0b1111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_slli_b(a: v16i8) -> v16i8 { @@ -7978,7 +7978,7 @@ pub unsafe fn __msa_slli_b(a: v16i8) -> v16i8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(slli.h, imm3 = 0b111))] +#[cfg_attr(test, assert_instr(slli.h, IMM3 = 0b111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_slli_h(a: v8i16) -> v8i16 { @@ -7994,7 +7994,7 @@ pub unsafe fn __msa_slli_h(a: v8i16) -> v8i16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(slli.w, imm2 = 0b11))] +#[cfg_attr(test, assert_instr(slli.w, IMM2 = 0b11))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_slli_w(a: v4i32) -> v4i32 { @@ -8010,7 +8010,7 @@ pub unsafe fn __msa_slli_w(a: v4i32) -> v4i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(slli.d, imm1 = 0b1))] +#[cfg_attr(test, assert_instr(slli.d, IMM1 = 0b1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_slli_d(a: v2i64) -> v2i64 { @@ -8085,7 +8085,7 @@ pub unsafe fn __msa_splat_d(a: v2i64, b: i32) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(splati.b, imm4 = 0b1111))] +#[cfg_attr(test, assert_instr(splati.b, IMM4 = 0b1111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_splati_b(a: v16i8) -> v16i8 { @@ -8100,7 +8100,7 @@ pub unsafe fn __msa_splati_b(a: v16i8) -> v16i8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(splati.h, imm3 = 0b111))] +#[cfg_attr(test, assert_instr(splati.h, IMM3 = 0b111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_splati_h(a: v8i16) -> v8i16 { @@ -8115,7 +8115,7 @@ pub unsafe fn __msa_splati_h(a: v8i16) -> v8i16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(splati.w, imm2 = 0b11))] +#[cfg_attr(test, assert_instr(splati.w, IMM2 = 0b11))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_splati_w(a: v4i32) -> v4i32 { @@ -8130,7 +8130,7 @@ pub unsafe fn __msa_splati_w(a: v4i32) -> v4i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(splati.d, imm1 = 0b1))] +#[cfg_attr(test, assert_instr(splati.d, IMM1 = 0b1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_splati_d(a: v2i64) -> v2i64 { @@ -8206,7 +8206,7 @@ pub unsafe fn __msa_sra_d(a: v2i64, b: v2i64) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(srai.b, imm3 = 0b111))] +#[cfg_attr(test, assert_instr(srai.b, IMM3 = 0b111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srai_b(a: v16i8) -> v16i8 { @@ -8222,7 +8222,7 @@ pub unsafe fn __msa_srai_b(a: v16i8) -> v16i8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(srai.h, imm4 = 0b1111))] +#[cfg_attr(test, assert_instr(srai.h, IMM4 = 0b1111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srai_h(a: v8i16) -> v8i16 { @@ -8238,7 +8238,7 @@ pub unsafe fn __msa_srai_h(a: v8i16) -> v8i16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(srai.w, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(srai.w, IMM5 = 0b11111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srai_w(a: v4i32) -> v4i32 { @@ -8254,7 +8254,7 @@ pub unsafe fn __msa_srai_w(a: v4i32) -> v4i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(srai.d, imm6 = 0b111111))] +#[cfg_attr(test, assert_instr(srai.d, IMM6 = 0b111111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srai_d(a: v2i64) -> v2i64 { @@ -8335,7 +8335,7 @@ pub unsafe fn __msa_srar_d(a: v2i64, b: v2i64) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(srari.b, imm3 = 0b111))] +#[cfg_attr(test, assert_instr(srari.b, IMM3 = 0b111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srari_b(a: v16i8) -> v16i8 { @@ -8352,7 +8352,7 @@ pub unsafe fn __msa_srari_b(a: v16i8) -> v16i8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(srari.h, imm4 = 0b1111))] +#[cfg_attr(test, assert_instr(srari.h, IMM4 = 0b1111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srari_h(a: v8i16) -> v8i16 { @@ -8369,7 +8369,7 @@ pub unsafe fn __msa_srari_h(a: v8i16) -> v8i16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(srari.w, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(srari.w, IMM5 = 0b11111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srari_w(a: v4i32) -> v4i32 { @@ -8386,7 +8386,7 @@ pub unsafe fn __msa_srari_w(a: v4i32) -> v4i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(srari.d, imm6 = 0b111111))] +#[cfg_attr(test, assert_instr(srari.d, IMM6 = 0b111111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srari_d(a: v2i64) -> v2i64 { @@ -8462,7 +8462,7 @@ pub unsafe fn __msa_srl_d(a: v2i64, b: v2i64) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(srli.b, imm4 = 0b1111))] +#[cfg_attr(test, assert_instr(srli.b, IMM4 = 0b1111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srli_b(a: v16i8) -> v16i8 { @@ -8478,7 +8478,7 @@ pub unsafe fn __msa_srli_b(a: v16i8) -> v16i8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(srli.h, imm3 = 0b111))] +#[cfg_attr(test, assert_instr(srli.h, IMM3 = 0b111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srli_h(a: v8i16) -> v8i16 { @@ -8494,7 +8494,7 @@ pub unsafe fn __msa_srli_h(a: v8i16) -> v8i16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(srli.w, imm2 = 0b11))] +#[cfg_attr(test, assert_instr(srli.w, IMM2 = 0b11))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srli_w(a: v4i32) -> v4i32 { @@ -8510,7 +8510,7 @@ pub unsafe fn __msa_srli_w(a: v4i32) -> v4i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(srli.d, imm1 = 0b1))] +#[cfg_attr(test, assert_instr(srli.d, IMM1 = 0b1))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srli_d(a: v2i64) -> v2i64 { @@ -8591,7 +8591,7 @@ pub unsafe fn __msa_srlr_d(a: v2i64, b: v2i64) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(srlri.b, imm3 = 0b111))] +#[cfg_attr(test, assert_instr(srlri.b, IMM3 = 0b111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srlri_b(a: v16i8) -> v16i8 { @@ -8608,7 +8608,7 @@ pub unsafe fn __msa_srlri_b(a: v16i8) -> v16i8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(srlri.h, imm4 = 0b1111))] +#[cfg_attr(test, assert_instr(srlri.h, IMM4 = 0b1111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srlri_h(a: v8i16) -> v8i16 { @@ -8625,7 +8625,7 @@ pub unsafe fn __msa_srlri_h(a: v8i16) -> v8i16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(srlri.w, imm5 = 0b11111))] +#[cfg_attr(test, assert_instr(srlri.w, IMM5 = 0b11111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srlri_w(a: v4i32) -> v4i32 { @@ -8642,7 +8642,7 @@ pub unsafe fn __msa_srlri_w(a: v4i32) -> v4i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(srlri.d, imm6 = 0b111111))] +#[cfg_attr(test, assert_instr(srlri.d, IMM6 = 0b111111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srlri_d(a: v2i64) -> v2i64 { @@ -8658,7 +8658,7 @@ pub unsafe fn __msa_srlri_d(a: v2i64) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(st.b, imm_s10 = 0b1111111111))] +#[cfg_attr(test, assert_instr(st.b, IMM_S10 = -1))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_st_b(a: v16i8, mem_addr: *mut u8) -> () { @@ -8674,7 +8674,7 @@ pub unsafe fn __msa_st_b(a: v16i8, mem_addr: *mut u8) -> () /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(st.h, imm_s11 = 0b11111111111))] +#[cfg_attr(test, assert_instr(st.h, IMM_S11 = -2))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_st_h(a: v8i16, mem_addr: *mut u8) -> () { @@ -8691,7 +8691,7 @@ pub unsafe fn __msa_st_h(a: v8i16, mem_addr: *mut u8) -> () /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(st.w, imm_s12 = 0b111111111111))] +#[cfg_attr(test, assert_instr(st.w, IMM_S12 = -4))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_st_w(a: v4i32, mem_addr: *mut u8) -> () { @@ -8708,7 +8708,7 @@ pub unsafe fn __msa_st_w(a: v4i32, mem_addr: *mut u8) -> () /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(st.d, imm_s13 = 0b1111111111111))] +#[cfg_attr(test, assert_instr(st.d, IMM_S13 = -8))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_st_d(a: v2i64, mem_addr: *mut u8) -> () { @@ -9021,7 +9021,7 @@ pub unsafe fn __msa_subv_d(a: v2i64, b: v2i64) -> v2i64 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(subvi.b, imm5 = 0b10111))] +#[cfg_attr(test, assert_instr(subvi.b, IMM5 = 0b10111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subvi_b(a: v16i8) -> v16i8 { @@ -9037,7 +9037,7 @@ pub unsafe fn __msa_subvi_b(a: v16i8) -> v16i8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(subvi.h, imm5 = 0b10111))] +#[cfg_attr(test, assert_instr(subvi.h, IMM5 = 0b10111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subvi_h(a: v8i16) -> v8i16 { @@ -9053,7 +9053,7 @@ pub unsafe fn __msa_subvi_h(a: v8i16) -> v8i16 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(subvi.w, imm5 = 0b10111))] +#[cfg_attr(test, assert_instr(subvi.w, IMM5 = 0b10111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subvi_w(a: v4i32) -> v4i32 { @@ -9069,7 +9069,7 @@ pub unsafe fn __msa_subvi_w(a: v4i32) -> v4i32 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(subvi.d, imm5 = 0b10111))] +#[cfg_attr(test, assert_instr(subvi.d, IMM5 = 0b10111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subvi_d(a: v2i64) -> v2i64 { @@ -9173,7 +9173,7 @@ pub unsafe fn __msa_xor_v(a: v16u8, b: v16u8) -> v16u8 { /// #[inline] #[target_feature(enable = "msa")] -#[cfg_attr(test, assert_instr(xori.b, imm8 = 0b11111111))] +#[cfg_attr(test, assert_instr(xori.b, IMM8 = 0b11111111))] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_xori_b(a: v16u8) -> v16u8 { @@ -9602,7 +9602,7 @@ mod tests { 103, -126, 103, -126 ); - assert_eq!(r, mem::transmute(__msa_addvi_b(mem::transmute(a), 67))); + assert_eq!(r, mem::transmute(__msa_addvi_b::<3>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -9618,7 +9618,7 @@ mod tests { -32766, 3279, -97, -124 ); - assert_eq!(r, mem::transmute(__msa_addvi_h(mem::transmute(a), 67))); + assert_eq!(r, mem::transmute(__msa_addvi_h::<3>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -9628,7 +9628,7 @@ mod tests { #[rustfmt::skip] let r = i32x4::new(103, -2147483646, 103, -2147483645); - assert_eq!(r, mem::transmute(__msa_addvi_w(mem::transmute(a), 67))); + assert_eq!(r, mem::transmute(__msa_addvi_w::<3>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -9638,7 +9638,7 @@ mod tests { #[rustfmt::skip] let r = i64x2::new(117, -9223372036854775791); - assert_eq!(r, mem::transmute(__msa_addvi_d(mem::transmute(a), 17))); + assert_eq!(r, mem::transmute(__msa_addvi_d::<17>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -9688,7 +9688,7 @@ mod tests { 4, 5, 4, 5 ); - assert_eq!(r, mem::transmute(__msa_andi_b(mem::transmute(a), 5))); + assert_eq!(r, mem::transmute(__msa_andi_b::<5>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -10233,7 +10233,7 @@ mod tests { 247, 147, 55, 1 ); - assert_eq!(r, mem::transmute(__msa_bclri_b(mem::transmute(a), 3))); + assert_eq!(r, mem::transmute(__msa_bclri_b::<3>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -10243,7 +10243,7 @@ mod tests { #[rustfmt::skip] let r = u16x8::new(107, 1155, 155, 1, 107, 1155, 155, 1); - assert_eq!(r, mem::transmute(__msa_bclri_h(mem::transmute(a), 11))); + assert_eq!(r, mem::transmute(__msa_bclri_h::<11>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -10253,7 +10253,7 @@ mod tests { #[rustfmt::skip] let r = u32x4::new(202722547, 102722547, 2722547, 1); - assert_eq!(r, mem::transmute(__msa_bclri_w(mem::transmute(a), 23))); + assert_eq!(r, mem::transmute(__msa_bclri_w::<23>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -10263,7 +10263,7 @@ mod tests { #[rustfmt::skip] let r = u64x2::new(73672157683, 11110973672157683); - assert_eq!(r, mem::transmute(__msa_bclri_d(mem::transmute(a), 37))); + assert_eq!(r, mem::transmute(__msa_bclri_d::<37>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -10408,7 +10408,7 @@ mod tests { assert_eq!( r, - mem::transmute(__msa_binsli_b(mem::transmute(a), mem::transmute(b), 5)) + mem::transmute(__msa_binsli_b::<5>(mem::transmute(a), mem::transmute(b))) ); } @@ -10432,7 +10432,7 @@ mod tests { assert_eq!( r, - mem::transmute(__msa_binsli_h(mem::transmute(a), mem::transmute(b), 13)) + mem::transmute(__msa_binsli_h::<13>(mem::transmute(a), mem::transmute(b))) ); } @@ -10447,7 +10447,7 @@ mod tests { assert_eq!( r, - mem::transmute(__msa_binsli_w(mem::transmute(a), mem::transmute(b), 17)) + mem::transmute(__msa_binsli_w::<17>(mem::transmute(a), mem::transmute(b))) ); } @@ -10462,7 +10462,7 @@ mod tests { assert_eq!( r, - mem::transmute(__msa_binsli_d(mem::transmute(a), mem::transmute(b), 48)) + mem::transmute(__msa_binsli_d::<48>(mem::transmute(a), mem::transmute(b))) ); } @@ -10608,7 +10608,7 @@ mod tests { assert_eq!( r, - mem::transmute(__msa_binsri_b(mem::transmute(a), mem::transmute(b), 5)) + mem::transmute(__msa_binsri_b::<5>(mem::transmute(a), mem::transmute(b))) ); } @@ -10632,7 +10632,7 @@ mod tests { assert_eq!( r, - mem::transmute(__msa_binsri_h(mem::transmute(a), mem::transmute(b), 13)) + mem::transmute(__msa_binsri_h::<13>(mem::transmute(a), mem::transmute(b))) ); } @@ -10647,7 +10647,7 @@ mod tests { assert_eq!( r, - mem::transmute(__msa_binsri_w(mem::transmute(a), mem::transmute(b), 17)) + mem::transmute(__msa_binsri_w::<17>(mem::transmute(a), mem::transmute(b))) ); } @@ -10662,7 +10662,7 @@ mod tests { assert_eq!( r, - mem::transmute(__msa_binsri_d(mem::transmute(a), mem::transmute(b), 48)) + mem::transmute(__msa_binsri_d::<48>(mem::transmute(a), mem::transmute(b))) ); } @@ -10733,7 +10733,7 @@ mod tests { assert_eq!( r, - mem::transmute(__msa_bmnzi_b(mem::transmute(a), mem::transmute(b), 7)) + mem::transmute(__msa_bmnzi_b::<7>(mem::transmute(a), mem::transmute(b))) ); } @@ -10804,7 +10804,7 @@ mod tests { assert_eq!( r, - mem::transmute(__msa_bmzi_b(mem::transmute(a), mem::transmute(b), 7)) + mem::transmute(__msa_bmzi_b::<7>(mem::transmute(a), mem::transmute(b))) ); } @@ -10900,7 +10900,7 @@ mod tests { 34, 116, 111, 239 ); - assert_eq!(r, mem::transmute(__msa_bnegi_b(mem::transmute(a), 4))); + assert_eq!(r, mem::transmute(__msa_bnegi_b::<4>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -10916,7 +10916,7 @@ mod tests { 30719, 1228, 2148, 2175 ); - assert_eq!(r, mem::transmute(__msa_bnegi_h(mem::transmute(a), 11))); + assert_eq!(r, mem::transmute(__msa_bnegi_h::<11>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -10926,7 +10926,7 @@ mod tests { #[rustfmt::skip] let r = u32x4::new(16777316, 2130706431, 16777316, 2164260864); - assert_eq!(r, mem::transmute(__msa_bnegi_w(mem::transmute(a), 24))); + assert_eq!(r, mem::transmute(__msa_bnegi_w::<24>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -10936,7 +10936,7 @@ mod tests { #[rustfmt::skip] let r = u64x2::new(4398046511204, 9223376434901286912); - assert_eq!(r, mem::transmute(__msa_bnegi_d(mem::transmute(a), 42))); + assert_eq!(r, mem::transmute(__msa_bnegi_d::<42>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11065,7 +11065,7 @@ mod tests { assert_eq!( r, - mem::transmute(__msa_bseli_b(mem::transmute(a), mem::transmute(b), 121)) + mem::transmute(__msa_bseli_b::<121>(mem::transmute(a), mem::transmute(b))) ); } @@ -11161,7 +11161,7 @@ mod tests { 255, 159, 55, 5 ); - assert_eq!(r, mem::transmute(__msa_bseti_b(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_bseti_b::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11171,7 +11171,7 @@ mod tests { #[rustfmt::skip] let r = u16x8::new(255, 159, 55, 5, 255, 159, 55, 5); - assert_eq!(r, mem::transmute(__msa_bseti_h(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_bseti_h::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11181,7 +11181,7 @@ mod tests { #[rustfmt::skip] let r = u32x4::new(255, 159, 55, 5); - assert_eq!(r, mem::transmute(__msa_bseti_w(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_bseti_w::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11191,7 +11191,7 @@ mod tests { #[rustfmt::skip] let r = u64x2::new(255, 159); - assert_eq!(r, mem::transmute(__msa_bseti_d(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_bseti_d::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11341,7 +11341,7 @@ mod tests { 0, 0, -1, 0 ); - assert_eq!(r, mem::transmute(__msa_ceqi_b(mem::transmute(a), -4))); + assert_eq!(r, mem::transmute(__msa_ceqi_b::<-4>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11354,7 +11354,7 @@ mod tests { #[rustfmt::skip] let r = i16x8::new(0, 0, 0, -1, 0, 0, 0, -1); - assert_eq!(r, mem::transmute(__msa_ceqi_h(mem::transmute(a), -11))); + assert_eq!(r, mem::transmute(__msa_ceqi_h::<-11>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11364,7 +11364,7 @@ mod tests { #[rustfmt::skip] let r = i32x4::new(0, 0, -1, 0); - assert_eq!(r, mem::transmute(__msa_ceqi_w(mem::transmute(a), 5))); + assert_eq!(r, mem::transmute(__msa_ceqi_w::<5>(mem::transmute(a)))); } // FIXME: https://reviews.llvm.org/D59884 @@ -11551,7 +11551,7 @@ mod tests { #[rustfmt::skip] let r = i8x16::new(-1, -1, 0, -1, -1, -1, 0, -1, -1, -1, 0, -1, -1, -1, 0, -1); - assert_eq!(r, mem::transmute(__msa_clei_s_b(mem::transmute(a), -2))); + assert_eq!(r, mem::transmute(__msa_clei_s_b::<-2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11564,7 +11564,7 @@ mod tests { #[rustfmt::skip] let r = i16x8::new(0, 0, 0, -1, 0, 0, 0, -1); - assert_eq!(r, mem::transmute(__msa_clei_s_h(mem::transmute(a), -1))); + assert_eq!(r, mem::transmute(__msa_clei_s_h::<-1>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11574,7 +11574,7 @@ mod tests { #[rustfmt::skip] let r = i32x4::new(0, 0, -1, 0); - assert_eq!(r, mem::transmute(__msa_clei_s_w(mem::transmute(a), 6))); + assert_eq!(r, mem::transmute(__msa_clei_s_w::<6>(mem::transmute(a)))); } // FIXME: https://reviews.llvm.org/D59884 @@ -11607,7 +11607,7 @@ mod tests { -1, 0, 0, 0 ); - assert_eq!(r, mem::transmute(__msa_clei_u_b(mem::transmute(a), 25))); + assert_eq!(r, mem::transmute(__msa_clei_u_b::<25>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11620,7 +11620,7 @@ mod tests { #[rustfmt::skip] let r = i16x8::new(-1, 0, -1, 0, -1, 0, -1, 0); - assert_eq!(r, mem::transmute(__msa_clei_u_h(mem::transmute(a), 25))); + assert_eq!(r, mem::transmute(__msa_clei_u_h::<25>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11630,7 +11630,7 @@ mod tests { #[rustfmt::skip] let r = i32x4::new(-1, 0, -1, 0); - assert_eq!(r, mem::transmute(__msa_clei_u_w(mem::transmute(a), 31))); + assert_eq!(r, mem::transmute(__msa_clei_u_w::<31>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11640,7 +11640,7 @@ mod tests { #[rustfmt::skip] let r = i64x2::new(-1, 0); - assert_eq!(r, mem::transmute(__msa_clei_u_d(mem::transmute(a), 25))); + assert_eq!(r, mem::transmute(__msa_clei_u_d::<25>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11810,7 +11810,7 @@ mod tests { 0, -1, 0, 0 ); - assert_eq!(r, mem::transmute(__msa_clti_s_b(mem::transmute(a), -5))); + assert_eq!(r, mem::transmute(__msa_clti_s_b::<-5>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11823,7 +11823,7 @@ mod tests { #[rustfmt::skip] let r = i16x8::new(-1, 0, 0, 0, -1, 0, 0, 0); - assert_eq!(r, mem::transmute(__msa_clti_s_h(mem::transmute(a), 15))); + assert_eq!(r, mem::transmute(__msa_clti_s_h::<15>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11833,7 +11833,7 @@ mod tests { #[rustfmt::skip] let r = i32x4::new(-1, 0, -1, 0); - assert_eq!(r, mem::transmute(__msa_clti_s_w(mem::transmute(a), -10))); + assert_eq!(r, mem::transmute(__msa_clti_s_w::<-10>(mem::transmute(a)))); } // FIXME: https://reviews.llvm.org/D59884 @@ -11866,7 +11866,7 @@ mod tests { -1, 0, 0, 0 ); - assert_eq!(r, mem::transmute(__msa_clti_u_b(mem::transmute(a), 50))); + assert_eq!(r, mem::transmute(__msa_clti_u_b::<3>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11879,7 +11879,7 @@ mod tests { #[rustfmt::skip] let r = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - assert_eq!(r, mem::transmute(__msa_clti_u_h(mem::transmute(a), 30))); + assert_eq!(r, mem::transmute(__msa_clti_u_h::<30>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11889,7 +11889,7 @@ mod tests { #[rustfmt::skip] let r = i32x4::new(0, 0, 0, 0); - assert_eq!(r, mem::transmute(__msa_clti_u_w(mem::transmute(a), 10))); + assert_eq!(r, mem::transmute(__msa_clti_u_w::<10>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11899,7 +11899,7 @@ mod tests { #[rustfmt::skip] let r = i64x2::new(-1, 0); - assert_eq!(r, mem::transmute(__msa_clti_u_d(mem::transmute(a), 10))); + assert_eq!(r, mem::transmute(__msa_clti_u_d::<10>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11914,7 +11914,7 @@ mod tests { #[rustfmt::skip] let r = -100 as i32; - assert_eq!(r, mem::transmute(__msa_copy_s_b(mem::transmute(a), 12))); + assert_eq!(r, mem::transmute(__msa_copy_s_b::<12>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11927,7 +11927,7 @@ mod tests { #[rustfmt::skip] let r = 32767 as i32; - assert_eq!(r, mem::transmute(__msa_copy_s_h(mem::transmute(a), 4))); + assert_eq!(r, mem::transmute(__msa_copy_s_h::<4>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11936,7 +11936,7 @@ mod tests { let a = i32x4::new(100, 2147483647, 5, -2147483647); let r = 2147483647 as i32; - assert_eq!(r, mem::transmute(__msa_copy_s_w(mem::transmute(a), 1))); + assert_eq!(r, mem::transmute(__msa_copy_s_w::<1>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11946,7 +11946,7 @@ mod tests { #[rustfmt::skip] let r = 9223372036854775807 as i64; - assert_eq!(r, mem::transmute(__msa_copy_s_d(mem::transmute(a), 1))); + assert_eq!(r, mem::transmute(__msa_copy_s_d::<1>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11961,7 +11961,7 @@ mod tests { #[rustfmt::skip] let r = 100 as u32; - assert_eq!(r, mem::transmute(__msa_copy_u_b(mem::transmute(a), 12))); + assert_eq!(r, mem::transmute(__msa_copy_u_b::<12>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11974,7 +11974,7 @@ mod tests { #[rustfmt::skip] let r = 32767 as u32; - assert_eq!(r, mem::transmute(__msa_copy_u_h(mem::transmute(a), 4))); + assert_eq!(r, mem::transmute(__msa_copy_u_h::<4>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11984,7 +11984,7 @@ mod tests { #[rustfmt::skip] let r = 2147483647 as u32; - assert_eq!(r, mem::transmute(__msa_copy_u_w(mem::transmute(a), 1))); + assert_eq!(r, mem::transmute(__msa_copy_u_w::<1>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -11994,7 +11994,7 @@ mod tests { #[rustfmt::skip] let r = 9223372036854775807 as u64; - assert_eq!(r, mem::transmute(__msa_copy_u_d(mem::transmute(a), 1))); + assert_eq!(r, mem::transmute(__msa_copy_u_d::<1>(mem::transmute(a)))); } // Can not be tested in user mode @@ -14618,7 +14618,10 @@ mod tests { 5, 127, 4, 127 ); - assert_eq!(r, mem::transmute(__msa_insert_b(mem::transmute(a), 12, 5))); + assert_eq!( + r, + mem::transmute(__msa_insert_b::<12>(mem::transmute(a), 5)) + ); } #[simd_test(enable = "msa")] @@ -14634,7 +14637,7 @@ mod tests { 5, 3276, 100, 11 ); - assert_eq!(r, mem::transmute(__msa_insert_h(mem::transmute(a), 4, 5))); + assert_eq!(r, mem::transmute(__msa_insert_h::<4>(mem::transmute(a), 5))); } #[simd_test(enable = "msa")] @@ -14644,7 +14647,7 @@ mod tests { #[rustfmt::skip] let r = i32x4::new(100, 7, 5, -2147483647); - assert_eq!(r, mem::transmute(__msa_insert_w(mem::transmute(a), 1, 7))); + assert_eq!(r, mem::transmute(__msa_insert_w::<1>(mem::transmute(a), 7))); } #[simd_test(enable = "msa")] @@ -14654,7 +14657,10 @@ mod tests { #[rustfmt::skip] let r = i64x2::new(3, 100); - assert_eq!(r, mem::transmute(__msa_insert_d(mem::transmute(a), 1, 100))); + assert_eq!( + r, + mem::transmute(__msa_insert_d::<1>(mem::transmute(a), 100)) + ); } #[simd_test(enable = "msa")] @@ -14683,7 +14689,7 @@ mod tests { assert_eq!( r, - mem::transmute(__msa_insve_b(mem::transmute(a), 12, mem::transmute(b))) + mem::transmute(__msa_insve_b::<12>(mem::transmute(a), mem::transmute(b))) ); } @@ -14707,7 +14713,7 @@ mod tests { assert_eq!( r, - mem::transmute(__msa_insve_h(mem::transmute(a), 4, mem::transmute(b))) + mem::transmute(__msa_insve_h::<4>(mem::transmute(a), mem::transmute(b))) ); } @@ -14722,7 +14728,7 @@ mod tests { assert_eq!( r, - mem::transmute(__msa_insve_w(mem::transmute(a), 3, mem::transmute(b))) + mem::transmute(__msa_insve_w::<3>(mem::transmute(a), mem::transmute(b))) ); } @@ -14737,7 +14743,7 @@ mod tests { assert_eq!( r, - mem::transmute(__msa_insve_d(mem::transmute(a), 1, mem::transmute(b))) + mem::transmute(__msa_insve_d::<1>(mem::transmute(a), mem::transmute(b))) ); } @@ -14759,7 +14765,7 @@ mod tests { 25, 26, 27, 28 ); - assert_eq!(r, mem::transmute(__msa_ld_b(p, 9))); + assert_eq!(r, mem::transmute(__msa_ld_b::<9>(p))); } #[simd_test(enable = "msa")] @@ -14773,7 +14779,7 @@ mod tests { #[rustfmt::skip] let r = i16x8::new(3, 4, 5, 6, 7, 8, 9, 10); - assert_eq!(r, mem::transmute(__msa_ld_h(p, -2))); + assert_eq!(r, mem::transmute(__msa_ld_h::<-2>(p))); } #[simd_test(enable = "msa")] @@ -14784,7 +14790,7 @@ mod tests { #[rustfmt::skip] let r = i32x4::new(2, 3, 4, 5); - assert_eq!(r, mem::transmute(__msa_ld_w(p, -4))); + assert_eq!(r, mem::transmute(__msa_ld_w::<-4>(p))); } #[simd_test(enable = "msa")] @@ -14795,7 +14801,7 @@ mod tests { #[rustfmt::skip] let r = i64x2::new(0, 1); - assert_eq!(r, mem::transmute(__msa_ld_d(p, -32))); + assert_eq!(r, mem::transmute(__msa_ld_d::<-32>(p))); } #[simd_test(enable = "msa")] @@ -14808,7 +14814,7 @@ mod tests { -20, -20, -20, -20 ); - assert_eq!(r, mem::transmute(__msa_ldi_b(-20))); + assert_eq!(r, mem::transmute(__msa_ldi_b::<-20>())); } #[simd_test(enable = "msa")] @@ -14819,7 +14825,7 @@ mod tests { 255, 255, 255, 255 ); - assert_eq!(r, mem::transmute(__msa_ldi_h(255))); + assert_eq!(r, mem::transmute(__msa_ldi_h::<255>())); } #[simd_test(enable = "msa")] @@ -14827,7 +14833,7 @@ mod tests { #[rustfmt::skip] let r = i32x4::new(-509, -509, -509, -509); - assert_eq!(r, mem::transmute(__msa_ldi_w(-509))); + assert_eq!(r, mem::transmute(__msa_ldi_w::<-509>())); } // FIXME: https://reviews.llvm.org/D59884 @@ -15288,7 +15294,7 @@ mod tests { 1, -16, -6, 8 ); - assert_eq!(r, mem::transmute(__msa_maxi_s_b(mem::transmute(a), -16))); + assert_eq!(r, mem::transmute(__msa_maxi_s_b::<-16>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -15298,7 +15304,7 @@ mod tests { #[rustfmt::skip] let r = i16x8::new(15, 15, 15, 15, 15, 15, 15, 15); - assert_eq!(r, mem::transmute(__msa_maxi_s_h(mem::transmute(a), 15))); + assert_eq!(r, mem::transmute(__msa_maxi_s_h::<15>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -15308,7 +15314,7 @@ mod tests { #[rustfmt::skip] let r = i32x4::new(1, 3, -5, -5); - assert_eq!(r, mem::transmute(__msa_maxi_s_w(mem::transmute(a), -5))); + assert_eq!(r, mem::transmute(__msa_maxi_s_w::<-5>(mem::transmute(a)))); } // FIXME: https://reviews.llvm.org/D59884 @@ -15341,7 +15347,7 @@ mod tests { 5, 5, 6, 8 ); - assert_eq!(r, mem::transmute(__msa_maxi_u_b(mem::transmute(a), 5))); + assert_eq!(r, mem::transmute(__msa_maxi_u_b::<5>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -15351,7 +15357,7 @@ mod tests { #[rustfmt::skip] let r = u16x8::new(5, 5, 6, 8, 5, 5, 6, 8); - assert_eq!(r, mem::transmute(__msa_maxi_u_h(mem::transmute(a), 5))); + assert_eq!(r, mem::transmute(__msa_maxi_u_h::<5>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -15361,7 +15367,7 @@ mod tests { #[rustfmt::skip] let r = u32x4::new(5, 5, 6, 8); - assert_eq!(r, mem::transmute(__msa_maxi_u_w(mem::transmute(a), 5))); + assert_eq!(r, mem::transmute(__msa_maxi_u_w::<5>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -15371,7 +15377,7 @@ mod tests { #[rustfmt::skip] let r = u64x2::new(5, 8); - assert_eq!(r, mem::transmute(__msa_maxi_u_d(mem::transmute(a), 5))); + assert_eq!(r, mem::transmute(__msa_maxi_u_d::<5>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -15541,7 +15547,7 @@ mod tests { -10, -10, -10, -10 ); - assert_eq!(r, mem::transmute(__msa_mini_s_b(mem::transmute(a), -10))); + assert_eq!(r, mem::transmute(__msa_mini_s_b::<-10>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -15551,7 +15557,7 @@ mod tests { #[rustfmt::skip] let r = i16x8::new(-3, -3, -3, -4, -3, -3, -3, -4); - assert_eq!(r, mem::transmute(__msa_mini_s_h(mem::transmute(a), -3))); + assert_eq!(r, mem::transmute(__msa_mini_s_h::<-3>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -15561,7 +15567,7 @@ mod tests { #[rustfmt::skip] let r = i32x4::new(-3, -3, -3, -4); - assert_eq!(r, mem::transmute(__msa_mini_s_w(mem::transmute(a), -3))); + assert_eq!(r, mem::transmute(__msa_mini_s_w::<-3>(mem::transmute(a)))); } // FIXME: https://reviews.llvm.org/D59884 @@ -15669,7 +15675,7 @@ mod tests { 1, 3, 5, 5 ); - assert_eq!(r, mem::transmute(__msa_mini_u_b(mem::transmute(a), 5))); + assert_eq!(r, mem::transmute(__msa_mini_u_b::<5>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -15679,7 +15685,7 @@ mod tests { #[rustfmt::skip] let r = u16x8::new(1, 3, 5, 5, 1, 3, 5, 5); - assert_eq!(r, mem::transmute(__msa_mini_u_h(mem::transmute(a), 5))); + assert_eq!(r, mem::transmute(__msa_mini_u_h::<5>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -15689,7 +15695,7 @@ mod tests { #[rustfmt::skip] let r = u32x4::new(1, 3, 5, 5); - assert_eq!(r, mem::transmute(__msa_mini_u_w(mem::transmute(a), 5))); + assert_eq!(r, mem::transmute(__msa_mini_u_w::<5>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -15699,7 +15705,7 @@ mod tests { #[rustfmt::skip] let r = u64x2::new(1, 5); - assert_eq!(r, mem::transmute(__msa_mini_u_d(mem::transmute(a), 5))); + assert_eq!(r, mem::transmute(__msa_mini_u_d::<5>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -16393,7 +16399,7 @@ mod tests { 242, 241, 240, 235 ); - assert_eq!(r, mem::transmute(__msa_nori_b(mem::transmute(a), 4))); + assert_eq!(r, mem::transmute(__msa_nori_b::<4>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -16443,7 +16449,7 @@ mod tests { 13, 14, 15, 20 ); - assert_eq!(r, mem::transmute(__msa_ori_b(mem::transmute(a), 4))); + assert_eq!(r, mem::transmute(__msa_ori_b::<4>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -16669,7 +16675,7 @@ mod tests { 3, 3, 3, 1 ); - assert_eq!(r, mem::transmute(__msa_sat_s_b(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_sat_s_b::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -16682,7 +16688,7 @@ mod tests { #[rustfmt::skip] let r = i16x8::new(127, 127, 127, 1, 127, 127, 127, 1); - assert_eq!(r, mem::transmute(__msa_sat_s_h(mem::transmute(a), 7))); + assert_eq!(r, mem::transmute(__msa_sat_s_h::<7>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -16692,7 +16698,7 @@ mod tests { #[rustfmt::skip] let r = i32x4::new(131071, 131071, 131071, 1); - assert_eq!(r, mem::transmute(__msa_sat_s_w(mem::transmute(a), 17))); + assert_eq!(r, mem::transmute(__msa_sat_s_w::<17>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -16702,7 +16708,7 @@ mod tests { #[rustfmt::skip] let r = i64x2::new(137438953471, 1); - assert_eq!(r, mem::transmute(__msa_sat_s_d(mem::transmute(a), 37))); + assert_eq!(r, mem::transmute(__msa_sat_s_d::<37>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -16722,7 +16728,7 @@ mod tests { 7, 7, 7, 1 ); - assert_eq!(r, mem::transmute(__msa_sat_u_b(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_sat_u_b::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -16735,7 +16741,7 @@ mod tests { #[rustfmt::skip] let r = u16x8::new(255, 255, 155, 1, 255, 255, 155, 1); - assert_eq!(r, mem::transmute(__msa_sat_u_h(mem::transmute(a), 7))); + assert_eq!(r, mem::transmute(__msa_sat_u_h::<7>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -16745,7 +16751,7 @@ mod tests { #[rustfmt::skip] let r = u32x4::new(262143, 262143, 262143, 1); - assert_eq!(r, mem::transmute(__msa_sat_u_w(mem::transmute(a), 17))); + assert_eq!(r, mem::transmute(__msa_sat_u_w::<17>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -16755,7 +16761,7 @@ mod tests { #[rustfmt::skip] let r = u64x2::new(274877906943, 1); - assert_eq!(r, mem::transmute(__msa_sat_u_d(mem::transmute(a), 37))); + assert_eq!(r, mem::transmute(__msa_sat_u_d::<37>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -16775,7 +16781,7 @@ mod tests { 11, 3, 4, 12 ); - assert_eq!(r, mem::transmute(__msa_shf_b(mem::transmute(a), 120))); + assert_eq!(r, mem::transmute(__msa_shf_b::<120>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -16788,7 +16794,7 @@ mod tests { #[rustfmt::skip] let r = i16x8::new(11, 14, 12, 13, 11, 14, 12, 13); - assert_eq!(r, mem::transmute(__msa_shf_h(mem::transmute(a), 156))); + assert_eq!(r, mem::transmute(__msa_shf_h::<156>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -16798,7 +16804,7 @@ mod tests { #[rustfmt::skip] let r = i32x4::new(1, 3, 2, 4); - assert_eq!(r, mem::transmute(__msa_shf_w(mem::transmute(a), 216))); + assert_eq!(r, mem::transmute(__msa_shf_w::<216>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -16902,7 +16908,7 @@ mod tests { assert_eq!( r, - mem::transmute(__msa_sldi_b(mem::transmute(a), mem::transmute(b), 5)) + mem::transmute(__msa_sldi_b::<5>(mem::transmute(a), mem::transmute(b))) ); } @@ -16917,7 +16923,7 @@ mod tests { assert_eq!( r, - mem::transmute(__msa_sldi_h(mem::transmute(a), mem::transmute(b), 2)) + mem::transmute(__msa_sldi_h::<2>(mem::transmute(a), mem::transmute(b))) ); } @@ -16932,7 +16938,7 @@ mod tests { assert_eq!( r, - mem::transmute(__msa_sldi_w(mem::transmute(a), mem::transmute(b), 4)) + mem::transmute(__msa_sldi_w::<0>(mem::transmute(a), mem::transmute(b))) ); } @@ -16947,7 +16953,7 @@ mod tests { assert_eq!( r, - mem::transmute(__msa_sldi_d(mem::transmute(a), mem::transmute(b), 2)) + mem::transmute(__msa_sldi_d::<0>(mem::transmute(a), mem::transmute(b))) ); } @@ -17043,7 +17049,7 @@ mod tests { 4, 8, 12, 16 ); - assert_eq!(r, mem::transmute(__msa_slli_b(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_slli_b::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17056,7 +17062,7 @@ mod tests { #[rustfmt::skip] let r = i16x8::new(4, 8, 12, 16, 4, 8, 12, 16); - assert_eq!(r, mem::transmute(__msa_slli_h(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_slli_h::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17066,7 +17072,7 @@ mod tests { #[rustfmt::skip] let r = i32x4::new(4, 8, 12, 16); - assert_eq!(r, mem::transmute(__msa_slli_w(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_slli_w::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17076,7 +17082,7 @@ mod tests { #[rustfmt::skip] let r = i64x2::new(2, 4); - assert_eq!(r, mem::transmute(__msa_slli_d(mem::transmute(a), 1))); + assert_eq!(r, mem::transmute(__msa_slli_d::<1>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17149,7 +17155,7 @@ mod tests { 3, 3, 3, 3 ); - assert_eq!(r, mem::transmute(__msa_splati_b(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_splati_b::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17162,7 +17168,7 @@ mod tests { #[rustfmt::skip] let r = i16x8::new(3, 3, 3, 3, 3, 3, 3, 3); - assert_eq!(r, mem::transmute(__msa_splati_h(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_splati_h::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17172,7 +17178,7 @@ mod tests { #[rustfmt::skip] let r = i32x4::new(3, 3, 3, 3); - assert_eq!(r, mem::transmute(__msa_splati_w(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_splati_w::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17182,7 +17188,7 @@ mod tests { #[rustfmt::skip] let r = i64x2::new(2, 2); - assert_eq!(r, mem::transmute(__msa_splati_d(mem::transmute(a), 1))); + assert_eq!(r, mem::transmute(__msa_splati_d::<1>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17286,7 +17292,7 @@ mod tests { 31, 31, 13, 0 ); - assert_eq!(r, mem::transmute(__msa_srai_b(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_srai_b::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17299,7 +17305,7 @@ mod tests { #[rustfmt::skip] let r = i16x8::new(8191, 31, 13, 0, 8191, 31, 13, 0); - assert_eq!(r, mem::transmute(__msa_srai_h(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_srai_h::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17308,7 +17314,7 @@ mod tests { let a = i32x4::new(i32::MAX, 125, 55, 1); let r = i32x4::new(536870911, 31, 13, 0); - assert_eq!(r, mem::transmute(__msa_srai_w(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_srai_w::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17318,7 +17324,7 @@ mod tests { #[rustfmt::skip] let r = i64x2::new(2305843009213693951, 13); - assert_eq!(r, mem::transmute(__msa_srai_d(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_srai_d::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17422,7 +17428,7 @@ mod tests { 31, 32, 14, 0 ); - assert_eq!(r, mem::transmute(__msa_srari_b(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_srari_b::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17432,7 +17438,7 @@ mod tests { #[rustfmt::skip] let r = i16x8::new(539, 289, 39, 0, 539, 289, 39, 0); - assert_eq!(r, mem::transmute(__msa_srari_h(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_srari_h::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17442,7 +17448,7 @@ mod tests { #[rustfmt::skip] let r = i32x4::new(52777789, 27777789, 2777789, 0); - assert_eq!(r, mem::transmute(__msa_srari_w(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_srari_w::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17452,7 +17458,7 @@ mod tests { #[rustfmt::skip] let r = i64x2::new(52777777789, 27777777789); - assert_eq!(r, mem::transmute(__msa_srari_d(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_srari_d::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17553,7 +17559,7 @@ mod tests { 6, 12, 25, 31 ); - assert_eq!(r, mem::transmute(__msa_srli_b(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_srli_b::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17569,7 +17575,7 @@ mod tests { 8191, 819, 25, 31 ); - assert_eq!(r, mem::transmute(__msa_srli_h(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_srli_h::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17579,7 +17585,7 @@ mod tests { #[rustfmt::skip] let r = i32x4::new(25, 536870911, 25, 536870911); - assert_eq!(r, mem::transmute(__msa_srli_w(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_srli_w::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17589,7 +17595,7 @@ mod tests { #[rustfmt::skip] let r = i64x2::new(50, 4611686018427387903); - assert_eq!(r, mem::transmute(__msa_srli_d(mem::transmute(a), 1))); + assert_eq!(r, mem::transmute(__msa_srli_d::<1>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17689,7 +17695,7 @@ mod tests { 6, 13, 25, 32 ); - assert_eq!(r, mem::transmute(__msa_srlri_b(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_srlri_b::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17701,7 +17707,7 @@ mod tests { ); let r = i16x8::new(8192, 819, 25, 32, 8192, 819, 25, 32); - assert_eq!(r, mem::transmute(__msa_srlri_h(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_srlri_h::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17711,7 +17717,7 @@ mod tests { #[rustfmt::skip] let r = i32x4::new(25, 38, 50, 536870912); - assert_eq!(r, mem::transmute(__msa_srlri_w(mem::transmute(a), 2))); + assert_eq!(r, mem::transmute(__msa_srlri_w::<2>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17721,7 +17727,7 @@ mod tests { #[rustfmt::skip] let r = i64x2::new(50, 4611686018427387904); - assert_eq!(r, mem::transmute(__msa_srlri_d(mem::transmute(a), 1))); + assert_eq!(r, mem::transmute(__msa_srlri_d::<1>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -17747,7 +17753,7 @@ mod tests { 21, 22, 23, 24, 25, 26, 27, 28 ]; - __msa_st_b(mem::transmute(a), arr.as_mut_ptr() as *mut u8, 0); + __msa_st_b::<0>(mem::transmute(a), arr.as_mut_ptr() as *mut u8); assert_eq!(arr, r); } @@ -17758,7 +17764,7 @@ mod tests { let mut arr: [i16; 8] = [0, 0, 0, 0, 0, 0, 0, 0]; #[rustfmt::skip] let r : [i16; 8] = [13, 14, 15, 16, 17, 18, 19, 20]; - __msa_st_h(mem::transmute(a), arr.as_mut_ptr() as *mut u8, 0); + __msa_st_h::<0>(mem::transmute(a), arr.as_mut_ptr() as *mut u8); assert_eq!(arr, r); } @@ -17769,7 +17775,7 @@ mod tests { let mut arr: [i32; 4] = [0, 0, 0, 0]; #[rustfmt::skip] let r : [i32; 4] = [13, 14, 15, 16]; - __msa_st_w(mem::transmute(a), arr.as_mut_ptr() as *mut u8, 0); + __msa_st_w::<0>(mem::transmute(a), arr.as_mut_ptr() as *mut u8); assert_eq!(arr, r); } @@ -17780,7 +17786,7 @@ mod tests { let mut arr: [i64; 2] = [0, 0]; #[rustfmt::skip] let r : [i64; 2] = [13, 14]; - __msa_st_d(mem::transmute(a), arr.as_mut_ptr() as *mut u8, 0); + __msa_st_d::<0>(mem::transmute(a), arr.as_mut_ptr() as *mut u8); assert_eq!(arr, r); } @@ -18194,7 +18200,7 @@ mod tests { 95, 122, 45, 123 ); - assert_eq!(r, mem::transmute(__msa_subvi_b(mem::transmute(a), 5))); + assert_eq!(r, mem::transmute(__msa_subvi_b::<5>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -18210,7 +18216,7 @@ mod tests { 32762, 3271, -105, 32763 ); - assert_eq!(r, mem::transmute(__msa_subvi_h(mem::transmute(a), 5))); + assert_eq!(r, mem::transmute(__msa_subvi_h::<5>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -18220,7 +18226,7 @@ mod tests { #[rustfmt::skip] let r = i32x4::new(95, 145, 195, 2147483642); - assert_eq!(r, mem::transmute(__msa_subvi_w(mem::transmute(a), 5))); + assert_eq!(r, mem::transmute(__msa_subvi_w::<5>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -18230,7 +18236,7 @@ mod tests { #[rustfmt::skip] let r = i64x2::new(95, 9223372036854775802); - assert_eq!(r, mem::transmute(__msa_subvi_d(mem::transmute(a), 5))); + assert_eq!(r, mem::transmute(__msa_subvi_d::<5>(mem::transmute(a)))); } #[simd_test(enable = "msa")] @@ -18392,6 +18398,6 @@ mod tests { 9, 10, 11, 20 ); - assert_eq!(r, mem::transmute(__msa_xori_b(mem::transmute(a), 4))); + assert_eq!(r, mem::transmute(__msa_xori_b::<4>(mem::transmute(a)))); } } diff --git a/library/stdarch/crates/simd-test-macro/src/lib.rs b/library/stdarch/crates/simd-test-macro/src/lib.rs index 92bb40946e1ff..9219540a1065f 100644 --- a/library/stdarch/crates/simd-test-macro/src/lib.rs +++ b/library/stdarch/crates/simd-test-macro/src/lib.rs @@ -71,6 +71,7 @@ pub fn simd_test( "powerpc64" | "powerpc64le" => "is_powerpc64_feature_detected", "loongarch32" | "loongarch64" => "is_loongarch_feature_detected", "s390x" => "is_s390x_feature_detected", + "mips64" | "mips64el" => "is_mips64_feature_detected", t => panic!("unknown target: {t}"), }; let macro_test = Ident::new(macro_test, Span::call_site()); From 96fb37ae6013ae97ca16a73ed9cf44b8b289fa94 Mon Sep 17 00:00:00 2001 From: lms0806 Date: Wed, 15 Apr 2026 12:52:23 +0900 Subject: [PATCH 51/64] add : new UI test --- ...owck-for-loop-deref-pattern-assignment.stderr | 5 +++-- .../borrowck_for_loop_pattern_assignment.rs | 9 +++++++++ .../borrowck_for_loop_pattern_assignment.stderr | 16 ++++++++++++++++ 3 files changed, 28 insertions(+), 2 deletions(-) create mode 100644 tests/ui/borrowck/borrowck_for_loop_pattern_assignment.rs create mode 100644 tests/ui/borrowck/borrowck_for_loop_pattern_assignment.stderr diff --git a/tests/ui/borrowck/borrowck-for-loop-deref-pattern-assignment.stderr b/tests/ui/borrowck/borrowck-for-loop-deref-pattern-assignment.stderr index fa230134df555..3c4d0e966136d 100644 --- a/tests/ui/borrowck/borrowck-for-loop-deref-pattern-assignment.stderr +++ b/tests/ui/borrowck/borrowck-for-loop-deref-pattern-assignment.stderr @@ -8,8 +8,9 @@ LL | num *= 2; | help: consider making this binding mutable | -LL | for &(mut num) num in nums { - | +++++++++ +LL - for &num in nums { +LL + for &(mut num) in nums { + | error: aborting due to 1 previous error diff --git a/tests/ui/borrowck/borrowck_for_loop_pattern_assignment.rs b/tests/ui/borrowck/borrowck_for_loop_pattern_assignment.rs new file mode 100644 index 0000000000000..93cbea820861d --- /dev/null +++ b/tests/ui/borrowck/borrowck_for_loop_pattern_assignment.rs @@ -0,0 +1,9 @@ +//! regression test for + +fn main() { + let nums: [u32; 3] = [1, 2, 3]; + for num in nums { + num *= 2; //~ ERROR cannot assign twice to immutable variable `num` + println!("{num}"); + } +} diff --git a/tests/ui/borrowck/borrowck_for_loop_pattern_assignment.stderr b/tests/ui/borrowck/borrowck_for_loop_pattern_assignment.stderr new file mode 100644 index 0000000000000..1dffe2b5e6436 --- /dev/null +++ b/tests/ui/borrowck/borrowck_for_loop_pattern_assignment.stderr @@ -0,0 +1,16 @@ +error[E0384]: cannot assign twice to immutable variable `num` + --> $DIR/borrowck_for_loop_pattern_assignment.rs:6:9 + | +LL | for num in nums { + | --- first assignment to `num` +LL | num *= 2; + | ^^^^^^^^ cannot assign twice to immutable variable + | +help: consider making this binding mutable + | +LL | for mut num in nums { + | +++ + +error: aborting due to 1 previous error + +For more information about this error, try `rustc --explain E0384`. From be3f77c7fffa997849938f5cb4cd1a2db3598f0e Mon Sep 17 00:00:00 2001 From: lms0806 Date: Wed, 15 Apr 2026 13:23:08 +0900 Subject: [PATCH 52/64] resolve : addressing incorrect recommendation methods --- .../src/diagnostics/conflict_errors.rs | 73 ++++++++++++++++--- 1 file changed, 62 insertions(+), 11 deletions(-) diff --git a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs index b8eefa4dd0714..07331a99b1752 100644 --- a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs +++ b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs @@ -4022,23 +4022,74 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { if let Some(decl) = local_decl && decl.can_be_made_mutable() { - let is_for_loop = matches!( - decl.local_info(), - LocalInfo::User(BindingForm::Var(VarBindingForm { - opt_match_place: Some((_, match_span)), - .. - })) if matches!(match_span.desugaring_kind(), Some(DesugaringKind::ForLoop)) - ); - let message = if is_for_loop + let mut is_for_loop = false; + let mut is_ref_pattern = false; + if let LocalInfo::User(BindingForm::Var(VarBindingForm { + opt_match_place: Some((_, match_span)), + .. + })) = *decl.local_info() + { + if matches!(match_span.desugaring_kind(), Some(DesugaringKind::ForLoop)) { + is_for_loop = true; + + if let Some(body) = self.infcx.tcx.hir_maybe_body_owned_by(self.mir_def_id()) { + struct RefPatternFinder<'tcx> { + tcx: TyCtxt<'tcx>, + binding_span: Span, + is_ref_pattern: bool, + } + + impl<'tcx> Visitor<'tcx> for RefPatternFinder<'tcx> { + type NestedFilter = OnlyBodies; + + fn maybe_tcx(&mut self) -> Self::MaybeTyCtxt { + self.tcx + } + + fn visit_pat(&mut self, pat: &'tcx hir::Pat<'tcx>) { + if !self.is_ref_pattern + && let hir::PatKind::Binding(_, _, ident, _) = pat.kind + && ident.span == self.binding_span + { + self.is_ref_pattern = + self.tcx.hir_parent_iter(pat.hir_id).any(|(_, node)| { + matches!( + node, + hir::Node::Pat(hir::Pat { + kind: hir::PatKind::Ref(..), + .. + }) + ) + }); + } + hir::intravisit::walk_pat(self, pat); + } + } + + let mut finder = RefPatternFinder { + tcx: self.infcx.tcx, + binding_span: decl.source_info.span, + is_ref_pattern: false, + }; + + finder.visit_body(body); + is_ref_pattern = finder.is_ref_pattern; + } + } + } + + let (span, message) = if is_for_loop + && is_ref_pattern && let Ok(binding_name) = self.infcx.tcx.sess.source_map().span_to_snippet(decl.source_info.span) { - format!("(mut {}) ", binding_name) + (decl.source_info.span, format!("(mut {})", binding_name)) } else { - "mut ".to_string() + (decl.source_info.span.shrink_to_lo(), "mut ".to_string()) }; + err.span_suggestion_verbose( - decl.source_info.span.shrink_to_lo(), + span, "consider making this binding mutable", message, Applicability::MachineApplicable, From 6236ddec5a47c2dc05dd98e9373ed6ca7d42d850 Mon Sep 17 00:00:00 2001 From: Jonathan Brouwer Date: Sat, 4 Apr 2026 14:49:19 +0200 Subject: [PATCH 53/64] Remove AttributeSafety from BUILTIN_ATTRIBUTES --- .../rustc_attr_parsing/src/attributes/cfg.rs | 2 + .../src/attributes/cfg_select.rs | 2 + .../src/attributes/codegen_attrs.rs | 6 + .../src/attributes/link_attrs.rs | 5 + .../rustc_attr_parsing/src/attributes/mod.rs | 20 + compiler/rustc_attr_parsing/src/context.rs | 4 +- compiler/rustc_attr_parsing/src/interface.rs | 35 +- compiler/rustc_attr_parsing/src/lib.rs | 1 + compiler/rustc_attr_parsing/src/safety.rs | 24 +- compiler/rustc_builtin_macros/src/cfg.rs | 4 +- compiler/rustc_expand/src/config.rs | 5 +- compiler/rustc_expand/src/expand.rs | 5 +- compiler/rustc_feature/src/builtin_attrs.rs | 467 ++++-------------- compiler/rustc_feature/src/lib.rs | 2 +- 14 files changed, 181 insertions(+), 401 deletions(-) diff --git a/compiler/rustc_attr_parsing/src/attributes/cfg.rs b/compiler/rustc_attr_parsing/src/attributes/cfg.rs index 6410d0c0cf702..84c83be8b4a5d 100644 --- a/compiler/rustc_attr_parsing/src/attributes/cfg.rs +++ b/compiler/rustc_attr_parsing/src/attributes/cfg.rs @@ -19,6 +19,7 @@ use rustc_session::parse::{ParseSess, feature_err}; use rustc_span::{ErrorGuaranteed, Span, Symbol, sym}; use thin_vec::ThinVec; +use crate::attributes::AttributeSafety; use crate::context::{AcceptContext, ShouldEmit, Stage}; use crate::parser::{ AllowExprMetavar, ArgParser, MetaItemListParser, MetaItemOrLitParser, NameValueParser, @@ -410,6 +411,7 @@ fn parse_cfg_attr_internal<'a>( attribute.style, AttrPath { segments: attribute.path().into_boxed_slice(), span: attribute.span }, Some(attribute.get_normal_item().unsafety), + AttributeSafety::Normal, ParsedDescription::Attribute, pred_span, lint_node_id, diff --git a/compiler/rustc_attr_parsing/src/attributes/cfg_select.rs b/compiler/rustc_attr_parsing/src/attributes/cfg_select.rs index 4ff224006ca89..918fd0a4582b7 100644 --- a/compiler/rustc_attr_parsing/src/attributes/cfg_select.rs +++ b/compiler/rustc_attr_parsing/src/attributes/cfg_select.rs @@ -12,6 +12,7 @@ use rustc_session::Session; use rustc_session::lint::builtin::UNREACHABLE_CFG_SELECT_PREDICATES; use rustc_span::{ErrorGuaranteed, Span, Symbol, sym}; +use crate::attributes::AttributeSafety; use crate::parser::{AllowExprMetavar, MetaItemOrLitParser}; use crate::{AttributeParser, ParsedDescription, ShouldEmit, errors, parse_cfg_entry}; @@ -105,6 +106,7 @@ pub fn parse_cfg_select( AttrStyle::Inner, AttrPath { segments: vec![sym::cfg_select].into_boxed_slice(), span: cfg_span }, None, + AttributeSafety::Normal, ParsedDescription::Macro, cfg_span, lint_node_id, diff --git a/compiler/rustc_attr_parsing/src/attributes/codegen_attrs.rs b/compiler/rustc_attr_parsing/src/attributes/codegen_attrs.rs index 73b2727fdab0a..53d02d09bb514 100644 --- a/compiler/rustc_attr_parsing/src/attributes/codegen_attrs.rs +++ b/compiler/rustc_attr_parsing/src/attributes/codegen_attrs.rs @@ -1,7 +1,9 @@ use rustc_hir::attrs::{CoverageAttrKind, OptimizeAttr, RtsanSetting, SanitizerSet, UsedBy}; use rustc_session::parse::feature_err; +use rustc_span::edition::Edition::Edition2024; use super::prelude::*; +use crate::attributes::AttributeSafety; use crate::session_diagnostics::{ NakedFunctionIncompatibleAttribute, NullOnExport, NullOnObjcClass, NullOnObjcSelector, ObjcClassExpectedStringLiteral, ObjcSelectorExpectedStringLiteral, @@ -103,6 +105,7 @@ pub(crate) struct ExportNameParser; impl SingleAttributeParser for ExportNameParser { const PATH: &[rustc_span::Symbol] = &[sym::export_name]; const ON_DUPLICATE: OnDuplicate = OnDuplicate::WarnButFutureError; + const SAFETY: AttributeSafety = AttributeSafety::Unsafe { unsafe_since: Some(Edition2024) }; const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[ Allow(Target::Static), Allow(Target::Fn), @@ -220,6 +223,7 @@ impl AttributeParser for NakedParser { this.span = Some(cx.attr_span); } })]; + const SAFETY: AttributeSafety = AttributeSafety::Unsafe { unsafe_since: None }; const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[ Allow(Target::Fn), Allow(Target::Method(MethodKind::Inherent)), @@ -340,6 +344,7 @@ pub(crate) struct NoMangleParser; impl NoArgsAttributeParser for NoMangleParser { const PATH: &[Symbol] = &[sym::no_mangle]; const ON_DUPLICATE: OnDuplicate = OnDuplicate::Warn; + const SAFETY: AttributeSafety = AttributeSafety::Unsafe { unsafe_since: Some(Edition2024) }; const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowListWarnRest(&[ Allow(Target::Fn), Allow(Target::Static), @@ -542,6 +547,7 @@ pub(crate) struct ForceTargetFeatureParser; impl CombineAttributeParser for ForceTargetFeatureParser { type Item = (Symbol, Span); const PATH: &[Symbol] = &[sym::force_target_feature]; + const SAFETY: AttributeSafety = AttributeSafety::Unsafe { unsafe_since: None }; const CONVERT: ConvertFn = |items, span| AttributeKind::TargetFeature { features: items, attr_span: span, diff --git a/compiler/rustc_attr_parsing/src/attributes/link_attrs.rs b/compiler/rustc_attr_parsing/src/attributes/link_attrs.rs index 8aa7759daa043..b6ba7f9e21d49 100644 --- a/compiler/rustc_attr_parsing/src/attributes/link_attrs.rs +++ b/compiler/rustc_attr_parsing/src/attributes/link_attrs.rs @@ -5,11 +5,13 @@ use rustc_hir::attrs::*; use rustc_session::Session; use rustc_session::lint::builtin::ILL_FORMED_ATTRIBUTE_INPUT; use rustc_session::parse::feature_err; +use rustc_span::edition::Edition::Edition2024; use rustc_span::kw; use rustc_target::spec::{Arch, BinaryFormat}; use super::prelude::*; use super::util::parse_single_integer; +use crate::attributes::AttributeSafety; use crate::attributes::cfg::parse_cfg_entry; use crate::session_diagnostics::{ AsNeededCompatibility, BundleNeedsStatic, EmptyLinkName, ExportSymbolsNeedsStatic, @@ -463,6 +465,7 @@ pub(crate) struct LinkSectionParser; impl SingleAttributeParser for LinkSectionParser { const PATH: &[Symbol] = &[sym::link_section]; const ON_DUPLICATE: OnDuplicate = OnDuplicate::WarnButFutureError; + const SAFETY: AttributeSafety = AttributeSafety::Unsafe { unsafe_since: Some(Edition2024) }; const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowListWarnRest(&[ Allow(Target::Static), Allow(Target::Fn), @@ -508,6 +511,7 @@ pub(crate) struct FfiConstParser; impl NoArgsAttributeParser for FfiConstParser { const PATH: &[Symbol] = &[sym::ffi_const]; const ON_DUPLICATE: OnDuplicate = OnDuplicate::Warn; + const SAFETY: AttributeSafety = AttributeSafety::Unsafe { unsafe_since: None }; const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::ForeignFn)]); const CREATE: fn(Span) -> AttributeKind = AttributeKind::FfiConst; } @@ -516,6 +520,7 @@ pub(crate) struct FfiPureParser; impl NoArgsAttributeParser for FfiPureParser { const PATH: &[Symbol] = &[sym::ffi_pure]; const ON_DUPLICATE: OnDuplicate = OnDuplicate::Warn; + const SAFETY: AttributeSafety = AttributeSafety::Unsafe { unsafe_since: None }; const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::ForeignFn)]); const CREATE: fn(Span) -> AttributeKind = AttributeKind::FfiPure; } diff --git a/compiler/rustc_attr_parsing/src/attributes/mod.rs b/compiler/rustc_attr_parsing/src/attributes/mod.rs index d7f64ff2319a9..ad5a541d3a25d 100644 --- a/compiler/rustc_attr_parsing/src/attributes/mod.rs +++ b/compiler/rustc_attr_parsing/src/attributes/mod.rs @@ -18,6 +18,7 @@ use std::marker::PhantomData; use rustc_feature::{AttributeTemplate, template}; use rustc_hir::attrs::AttributeKind; +use rustc_span::edition::Edition; use rustc_span::{Span, Symbol}; use thin_vec::ThinVec; @@ -97,6 +98,7 @@ pub(crate) trait AttributeParser: Default + 'static { /// If an attribute has this symbol, the `accept` function will be called on it. const ATTRIBUTES: AcceptMapping; const ALLOWED_TARGETS: AllowedTargets; + const SAFETY: AttributeSafety = AttributeSafety::Normal; /// The parser has gotten a chance to accept the attributes on an item, /// here it can produce an attribute. @@ -127,6 +129,7 @@ pub(crate) trait SingleAttributeParser: 'static { /// Configures what to do when when the same attribute is /// applied more than once on the same syntax node. const ON_DUPLICATE: OnDuplicate; + const SAFETY: AttributeSafety = AttributeSafety::Normal; const ALLOWED_TARGETS: AllowedTargets; @@ -165,6 +168,7 @@ impl, S: Stage> AttributeParser for Single }, )]; const ALLOWED_TARGETS: AllowedTargets = T::ALLOWED_TARGETS; + const SAFETY: AttributeSafety = T::SAFETY; fn finalize(self, _cx: &FinalizeContext<'_, '_, S>) -> Option { Some(self.1?.0) @@ -217,6 +221,18 @@ impl OnDuplicate { } } +#[derive(Copy, Clone, PartialEq, Debug)] +pub enum AttributeSafety { + /// Normal attribute that does not need `#[unsafe(...)]` + Normal, + /// Unsafe attribute that requires safety obligations to be discharged. + /// + /// An error is emitted when `#[unsafe(...)]` is omitted, except when the attribute's edition + /// is less than the one stored in `unsafe_since`. This handles attributes that were safe in + /// earlier editions, but become unsafe in later ones. + Unsafe { unsafe_since: Option }, +} + /// An even simpler version of [`SingleAttributeParser`]: /// now automatically check that there are no arguments provided to the attribute. /// @@ -226,6 +242,7 @@ pub(crate) trait NoArgsAttributeParser: 'static { const PATH: &[Symbol]; const ON_DUPLICATE: OnDuplicate; const ALLOWED_TARGETS: AllowedTargets; + const SAFETY: AttributeSafety = AttributeSafety::Normal; /// Create the [`AttributeKind`] given attribute's [`Span`]. const CREATE: fn(Span) -> AttributeKind; @@ -242,6 +259,7 @@ impl, S: Stage> Default for WithoutArgs { impl, S: Stage> SingleAttributeParser for WithoutArgs { const PATH: &[Symbol] = T::PATH; const ON_DUPLICATE: OnDuplicate = T::ON_DUPLICATE; + const SAFETY: AttributeSafety = T::SAFETY; const ALLOWED_TARGETS: AllowedTargets = T::ALLOWED_TARGETS; const TEMPLATE: AttributeTemplate = template!(Word); @@ -271,6 +289,7 @@ pub(crate) trait CombineAttributeParser: 'static { /// For example, individual representations from `#[repr(...)]` attributes into an `AttributeKind::Repr(x)`, /// where `x` is a vec of these individual reprs. const CONVERT: ConvertFn; + const SAFETY: AttributeSafety = AttributeSafety::Normal; const ALLOWED_TARGETS: AllowedTargets; @@ -312,6 +331,7 @@ impl, S: Stage> AttributeParser for Combine) -> Option { if let Some(first_span) = self.first_span { diff --git a/compiler/rustc_attr_parsing/src/context.rs b/compiler/rustc_attr_parsing/src/context.rs index 3f722bef5bf35..647c816247bf9 100644 --- a/compiler/rustc_attr_parsing/src/context.rs +++ b/compiler/rustc_attr_parsing/src/context.rs @@ -59,7 +59,7 @@ use crate::attributes::stability::*; use crate::attributes::test_attrs::*; use crate::attributes::traits::*; use crate::attributes::transparency::*; -use crate::attributes::{AttributeParser as _, Combine, Single, WithoutArgs}; +use crate::attributes::{AttributeParser as _, AttributeSafety, Combine, Single, WithoutArgs}; use crate::parser::{ArgParser, MetaItemOrLitParser, RefPathParser}; use crate::session_diagnostics::{ AttributeParseError, AttributeParseErrorReason, AttributeParseErrorSuggestions, @@ -76,6 +76,7 @@ pub(super) struct GroupTypeInnerAccept { pub(super) template: AttributeTemplate, pub(super) accept_fn: AcceptFn, pub(super) allowed_targets: AllowedTargets, + pub(super) safety: AttributeSafety, pub(super) finalizer: FinalizeFn, } @@ -126,6 +127,7 @@ macro_rules! attribute_parsers { accept_fn(s, cx, args) }) }), + safety: <$names as crate::attributes::AttributeParser<$stage>>::SAFETY, allowed_targets: <$names as crate::attributes::AttributeParser<$stage>>::ALLOWED_TARGETS, finalizer: Box::new(|cx| { let state = STATE_OBJECT.take(); diff --git a/compiler/rustc_attr_parsing/src/interface.rs b/compiler/rustc_attr_parsing/src/interface.rs index 68016d81c954c..85e714a1a917c 100644 --- a/compiler/rustc_attr_parsing/src/interface.rs +++ b/compiler/rustc_attr_parsing/src/interface.rs @@ -12,6 +12,7 @@ use rustc_session::Session; use rustc_session::lint::LintId; use rustc_span::{DUMMY_SP, Span, Symbol, sym}; +use crate::attributes::AttributeSafety; use crate::context::{AcceptContext, FinalizeContext, FinalizeFn, SharedContext, Stage}; use crate::early_parsed::{EARLY_PARSED_ATTRIBUTES, EarlyParsedState}; use crate::parser::{AllowExprMetavar, ArgParser, PathParser, RefPathParser}; @@ -135,6 +136,7 @@ impl<'sess> AttributeParser<'sess, Early> { parse_fn: fn(cx: &mut AcceptContext<'_, '_, Early>, item: &ArgParser) -> Option, template: &AttributeTemplate, allow_expr_metavar: AllowExprMetavar, + expected_safety: AttributeSafety, ) -> Option { let ast::AttrKind::Normal(normal_attr) = &attr.kind else { panic!("parse_single called on a doc attr") @@ -157,6 +159,7 @@ impl<'sess> AttributeParser<'sess, Early> { attr.style, path, Some(normal_attr.item.unsafety), + expected_safety, ParsedDescription::Attribute, target_span, target_node_id, @@ -178,6 +181,7 @@ impl<'sess> AttributeParser<'sess, Early> { attr_style: AttrStyle, attr_path: AttrPath, attr_safety: Option, + expected_safety: AttributeSafety, parsed_description: ParsedDescription, target_span: Span, target_node_id: NodeId, @@ -199,7 +203,13 @@ impl<'sess> AttributeParser<'sess, Early> { sess.psess.buffer_lint(lint_id.lint, span, target_node_id, kind) }; if let Some(safety) = attr_safety { - parser.check_attribute_safety(&attr_path, inner_span, safety, &mut emit_lint) + parser.check_attribute_safety( + &attr_path, + inner_span, + safety, + expected_safety, + &mut emit_lint, + ) } let mut cx: AcceptContext<'_, 'sess, Early> = AcceptContext { shared: SharedContext { @@ -314,17 +324,18 @@ impl<'sess, S: Stage> AttributeParser<'sess, S> { } }; - self.check_attribute_safety( - &attr_path, - lower_span(n.item.span()), - n.item.unsafety, - &mut emit_lint, - ); - let parts = n.item.path.segments.iter().map(|seg| seg.ident.name).collect::>(); if let Some(accept) = S::parsers().accepters.get(parts.as_slice()) { + self.check_attribute_safety( + &attr_path, + lower_span(n.item.span()), + n.item.unsafety, + accept.safety, + &mut emit_lint, + ); + let Some(args) = ArgParser::from_attr_args( args, &parts, @@ -397,6 +408,14 @@ impl<'sess, S: Stage> AttributeParser<'sess, S> { span: attr_span, }; + self.check_attribute_safety( + &attr_path, + lower_span(n.item.span()), + n.item.unsafety, + AttributeSafety::Normal, + &mut emit_lint, + ); + if !matches!(self.stage.should_emit(), ShouldEmit::Nothing) && target == Target::Crate { diff --git a/compiler/rustc_attr_parsing/src/lib.rs b/compiler/rustc_attr_parsing/src/lib.rs index 93eb5a0c3ab73..1b08ed3c49b78 100644 --- a/compiler/rustc_attr_parsing/src/lib.rs +++ b/compiler/rustc_attr_parsing/src/lib.rs @@ -106,6 +106,7 @@ mod session_diagnostics; mod target_checking; pub mod validate_attr; +pub use attributes::AttributeSafety; pub use attributes::cfg::{ CFG_TEMPLATE, EvalConfigResult, eval_config_entry, parse_cfg, parse_cfg_attr, parse_cfg_entry, }; diff --git a/compiler/rustc_attr_parsing/src/safety.rs b/compiler/rustc_attr_parsing/src/safety.rs index 262c9c7723eeb..26212ee5f4ca2 100644 --- a/compiler/rustc_attr_parsing/src/safety.rs +++ b/compiler/rustc_attr_parsing/src/safety.rs @@ -1,12 +1,12 @@ use rustc_ast::Safety; use rustc_errors::MultiSpan; -use rustc_feature::{AttributeSafety, BUILTIN_ATTRIBUTE_MAP}; use rustc_hir::AttrPath; use rustc_hir::lints::AttributeLintKind; use rustc_session::lint::LintId; use rustc_session::lint::builtin::UNSAFE_ATTR_OUTSIDE_UNSAFE; use rustc_span::Span; +use crate::attributes::AttributeSafety; use crate::context::Stage; use crate::{AttributeParser, ShouldEmit}; @@ -16,28 +16,23 @@ impl<'sess, S: Stage> AttributeParser<'sess, S> { attr_path: &AttrPath, attr_span: Span, attr_safety: Safety, + expected_safety: AttributeSafety, emit_lint: &mut impl FnMut(LintId, MultiSpan, AttributeLintKind), ) { if matches!(self.stage.should_emit(), ShouldEmit::Nothing) { return; } - let name = (attr_path.segments.len() == 1).then_some(attr_path.segments[0]); - - // FIXME: We should retrieve this information from the attribute parsers instead of from `BUILTIN_ATTRIBUTE_MAP` - let builtin_attr_info = name.and_then(|name| BUILTIN_ATTRIBUTE_MAP.get(&name)); - let builtin_attr_safety = builtin_attr_info.map(|x| x.safety); - - match (builtin_attr_safety, attr_safety) { + match (expected_safety, attr_safety) { // - Unsafe builtin attribute // - User wrote `#[unsafe(..)]`, which is permitted on any edition - (Some(AttributeSafety::Unsafe { .. }), Safety::Unsafe(..)) => { + (AttributeSafety::Unsafe { .. }, Safety::Unsafe(..)) => { // OK } // - Unsafe builtin attribute // - User did not write `#[unsafe(..)]` - (Some(AttributeSafety::Unsafe { unsafe_since }), Safety::Default) => { + (AttributeSafety::Unsafe { unsafe_since }, Safety::Default) => { let path_span = attr_path.span; // If the `attr_item`'s span is not from a macro, then just suggest @@ -96,7 +91,7 @@ impl<'sess, S: Stage> AttributeParser<'sess, S> { // - Normal builtin attribute // - Writing `#[unsafe(..)]` is not permitted on normal builtin attributes - (None | Some(AttributeSafety::Normal), Safety::Unsafe(unsafe_span)) => { + (AttributeSafety::Normal, Safety::Unsafe(unsafe_span)) => { self.stage.emit_err( self.sess, crate::session_diagnostics::InvalidAttrUnsafe { @@ -108,14 +103,11 @@ impl<'sess, S: Stage> AttributeParser<'sess, S> { // - Normal builtin attribute // - No explicit `#[unsafe(..)]` written. - (None | Some(AttributeSafety::Normal), Safety::Default) => { + (AttributeSafety::Normal, Safety::Default) => { // OK } - ( - Some(AttributeSafety::Unsafe { .. } | AttributeSafety::Normal) | None, - Safety::Safe(..), - ) => { + (_, Safety::Safe(..)) => { self.sess.dcx().span_delayed_bug( attr_span, "`check_attribute_safety` does not expect `Safety::Safe` on attributes", diff --git a/compiler/rustc_builtin_macros/src/cfg.rs b/compiler/rustc_builtin_macros/src/cfg.rs index c4a458089f2d2..2872cff0fdc7a 100644 --- a/compiler/rustc_builtin_macros/src/cfg.rs +++ b/compiler/rustc_builtin_macros/src/cfg.rs @@ -6,7 +6,8 @@ use rustc_ast::tokenstream::TokenStream; use rustc_ast::{AttrStyle, token}; use rustc_attr_parsing::parser::{AllowExprMetavar, MetaItemOrLitParser}; use rustc_attr_parsing::{ - self as attr, AttributeParser, CFG_TEMPLATE, ParsedDescription, ShouldEmit, parse_cfg_entry, + self as attr, AttributeParser, AttributeSafety, CFG_TEMPLATE, ParsedDescription, ShouldEmit, + parse_cfg_entry, }; use rustc_expand::base::{DummyResult, ExpandResult, ExtCtxt, MacEager, MacroExpanderResult}; use rustc_hir::attrs::CfgEntry; @@ -53,6 +54,7 @@ fn parse_cfg(cx: &ExtCtxt<'_>, span: Span, tts: TokenStream) -> Result StripUnconfigured<'a> { parse_cfg, &CFG_TEMPLATE, AllowExprMetavar::Yes, + AttributeSafety::Normal, ) else { // Cfg attribute was not parsable, give up return EvalConfigResult::True; diff --git a/compiler/rustc_expand/src/expand.rs b/compiler/rustc_expand/src/expand.rs index 5901f318ff3a9..804d3c02b413d 100644 --- a/compiler/rustc_expand/src/expand.rs +++ b/compiler/rustc_expand/src/expand.rs @@ -15,8 +15,8 @@ use rustc_ast::{ use rustc_ast_pretty::pprust; use rustc_attr_parsing::parser::AllowExprMetavar; use rustc_attr_parsing::{ - AttributeParser, CFG_TEMPLATE, Early, EvalConfigResult, ShouldEmit, eval_config_entry, - parse_cfg, validate_attr, + AttributeParser, AttributeSafety, CFG_TEMPLATE, Early, EvalConfigResult, ShouldEmit, + eval_config_entry, parse_cfg, validate_attr, }; use rustc_data_structures::flat_map_in_place::FlatMapInPlace; use rustc_data_structures::stack::ensure_sufficient_stack; @@ -2331,6 +2331,7 @@ impl<'a, 'b> InvocationCollector<'a, 'b> { parse_cfg, &CFG_TEMPLATE, AllowExprMetavar::Yes, + AttributeSafety::Normal, ) else { // Cfg attribute was not parsable, give up return EvalConfigResult::True; diff --git a/compiler/rustc_feature/src/builtin_attrs.rs b/compiler/rustc_feature/src/builtin_attrs.rs index 1c1bca0cbc3cf..144c9f6d0c4dc 100644 --- a/compiler/rustc_feature/src/builtin_attrs.rs +++ b/compiler/rustc_feature/src/builtin_attrs.rs @@ -5,7 +5,6 @@ use std::sync::LazyLock; use AttributeGate::*; use rustc_data_structures::fx::FxHashMap; use rustc_hir::AttrStyle; -use rustc_span::edition::Edition; use rustc_span::{Symbol, sym}; use crate::Features; @@ -67,23 +66,6 @@ pub fn find_gated_cfg(pred: impl Fn(Symbol) -> bool) -> Option<&'static GatedCfg GATED_CFGS.iter().find(|(cfg_sym, ..)| pred(*cfg_sym)) } -// If you change this, please modify `src/doc/unstable-book` as well. You must -// move that documentation into the relevant place in the other docs, and -// remove the chapter on the flag. - -#[derive(Copy, Clone, PartialEq, Debug)] -pub enum AttributeSafety { - /// Normal attribute that does not need `#[unsafe(...)]` - Normal, - - /// Unsafe attribute that requires safety obligations to be discharged. - /// - /// An error is emitted when `#[unsafe(...)]` is omitted, except when the attribute's edition - /// is less than the one stored in `unsafe_since`. This handles attributes that were safe in - /// earlier editions, but become unsafe in later ones. - Unsafe { unsafe_since: Option }, -} - #[derive(Clone, Debug, Copy)] pub enum AttributeGate { /// A gated attribute which requires a feature gate to be enabled. @@ -205,54 +187,15 @@ macro_rules! template { } macro_rules! ungated { - (unsafe($edition:ident) $attr:ident $(,)?) => { - BuiltinAttribute { - name: sym::$attr, - safety: AttributeSafety::Unsafe { unsafe_since: Some(Edition::$edition) }, - gate: Ungated, - } - }; - (unsafe $attr:ident $(,)?) => { - BuiltinAttribute { - name: sym::$attr, - safety: AttributeSafety::Unsafe { unsafe_since: None }, - gate: Ungated, - } - }; ($attr:ident $(,)?) => { - BuiltinAttribute { name: sym::$attr, safety: AttributeSafety::Normal, gate: Ungated } + BuiltinAttribute { name: sym::$attr, gate: Ungated } }; } macro_rules! gated { - (unsafe $attr:ident, $gate:ident, $message:expr $(,)?) => { - BuiltinAttribute { - name: sym::$attr, - safety: AttributeSafety::Unsafe { unsafe_since: None }, - gate: Gated { - feature: sym::$gate, - message: $message, - check: Features::$gate, - notes: &[], - }, - } - }; - (unsafe $attr:ident, $message:expr $(,)?) => { - BuiltinAttribute { - name: sym::$attr, - safety: AttributeSafety::Unsafe { unsafe_since: None }, - gate: Gated { - feature: sym::$attr, - message: $message, - check: Features::$attr, - notes: &[], - }, - } - }; ($attr:ident, $gate:ident, $message:expr $(,)?) => { BuiltinAttribute { name: sym::$attr, - safety: AttributeSafety::Normal, gate: Gated { feature: sym::$gate, message: $message, @@ -264,7 +207,6 @@ macro_rules! gated { ($attr:ident, $message:expr $(,)?) => { BuiltinAttribute { name: sym::$attr, - safety: AttributeSafety::Normal, gate: Gated { feature: sym::$attr, message: $message, @@ -289,7 +231,6 @@ macro_rules! rustc_attr { ($attr:ident $(, $notes:expr)* $(,)?) => { BuiltinAttribute { name: sym::$attr, - safety: AttributeSafety::Normal, gate: Gated { feature: sym::rustc_attrs, message: "use of an internal attribute", @@ -299,7 +240,7 @@ macro_rules! rustc_attr { stringify!($attr), "]` attribute is an internal implementation detail that will never be stable"), $($notes),* - ] + ] }, } }; @@ -313,7 +254,6 @@ macro_rules! experimental { pub struct BuiltinAttribute { pub name: Symbol, - pub safety: AttributeSafety, pub gate: AttributeGate, } @@ -348,10 +288,7 @@ pub static BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[ ungated!(forbid), ungated!(deny), ungated!(must_use), - gated!( - must_not_suspend, - experimental!(must_not_suspend) - ), + gated!(must_not_suspend, experimental!(must_not_suspend)), ungated!(deprecated), // Crate properties: @@ -366,222 +303,103 @@ pub static BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[ // FIXME(#82232, #143834): temporarily renamed to mitigate `#[align]` nameres ambiguity gated!(rustc_align, fn_align, experimental!(rustc_align)), gated!(rustc_align_static, static_align, experimental!(rustc_align_static)), - ungated!( - unsafe(Edition2024) export_name, - ), - ungated!( - unsafe(Edition2024) link_section, - ), - ungated!( - unsafe(Edition2024) no_mangle, - ), - ungated!( - used, - ), - ungated!( - link_ordinal, - ), - ungated!( - unsafe naked, - ), + ungated!(export_name), + ungated!(link_section), + ungated!(no_mangle), + ungated!(used), + ungated!(link_ordinal), + ungated!(naked), // See `TyAndLayout::pass_indirectly_in_non_rustic_abis` for details. - rustc_attr!( - rustc_pass_indirectly_in_non_rustic_abis, - "types marked with `#[rustc_pass_indirectly_in_non_rustic_abis]` are always passed indirectly by non-Rustic ABIs" - ), + rustc_attr!(rustc_pass_indirectly_in_non_rustic_abis, "types marked with `#[rustc_pass_indirectly_in_non_rustic_abis]` are always passed indirectly by non-Rustic ABIs"), // Limits: - ungated!( - recursion_limit, - ), - ungated!( - type_length_limit, - ), - gated!( - move_size_limit, - large_assignments, experimental!(move_size_limit) - ), + ungated!(recursion_limit), + ungated!(type_length_limit), + gated!(move_size_limit, large_assignments, experimental!(move_size_limit)), // Entry point: - ungated!( - no_main, - ), + ungated!(no_main), // Modules, prelude, and resolution: - ungated!( - path, - ), - ungated!( - no_std, - ), - ungated!( - no_implicit_prelude, - ), - ungated!( - non_exhaustive, - ), + ungated!(path), + ungated!(no_std), + ungated!(no_implicit_prelude), + ungated!(non_exhaustive), // Runtime - ungated!( - windows_subsystem, - ), - ungated!( // RFC 2070 - panic_handler, - ), + ungated!(windows_subsystem), + ungated!(panic_handler), // RFC 2070 // Code generation: - ungated!( - inline, - ), - ungated!( - cold, - ), - ungated!( - no_builtins, - ), - ungated!( - target_feature, - ), - ungated!( - track_caller, - ), - ungated!( - instruction_set, - ), - gated!( - unsafe force_target_feature, - effective_target_features, experimental!(force_target_feature) - ), - gated!( - sanitize, - sanitize, experimental!(sanitize), - ), - gated!( - coverage, - coverage_attribute, experimental!(coverage) - ), - - ungated!( - doc, - ), + ungated!(inline), + ungated!(cold), + ungated!(no_builtins), + ungated!(target_feature), + ungated!(track_caller), + ungated!(instruction_set), + gated!(force_target_feature, effective_target_features, experimental!(force_target_feature)), + gated!(sanitize, sanitize, experimental!(sanitize)), + gated!(coverage, coverage_attribute, experimental!(coverage)), + + ungated!(doc), // Debugging - ungated!( - debugger_visualizer, - ), - ungated!( - collapse_debuginfo, - ), + ungated!(debugger_visualizer), + ungated!(collapse_debuginfo), // ========================================================================== // Unstable attributes: // ========================================================================== // Linking: - gated!( - export_stable, - experimental!(export_stable) - ), + gated!(export_stable, experimental!(export_stable)), // Testing: - gated!( - test_runner, - custom_test_frameworks, - "custom test frameworks are an unstable feature", - ), + gated!(test_runner, custom_test_frameworks, "custom test frameworks are an unstable feature"), - gated!( - reexport_test_harness_main, - custom_test_frameworks, - "custom test frameworks are an unstable feature", - ), + gated!(reexport_test_harness_main, custom_test_frameworks, "custom test frameworks are an unstable feature"), // RFC #1268 - gated!( - marker, - marker_trait_attr, experimental!(marker) - ), - gated!( - thread_local, - "`#[thread_local]` is an experimental feature, and does not currently handle destructors", - ), - gated!( - no_core, - experimental!(no_core) - ), + gated!(marker, marker_trait_attr, experimental!(marker)), + gated!(thread_local, "`#[thread_local]` is an experimental feature, and does not currently handle destructors"), + gated!(no_core, experimental!(no_core)), // RFC 2412 - gated!( - optimize, - optimize_attribute, experimental!(optimize) - ), + gated!(optimize, optimize_attribute, experimental!(optimize)), - gated!( - unsafe ffi_pure, - experimental!(ffi_pure) - ), - gated!( - unsafe ffi_const, - experimental!(ffi_const) - ), - gated!( - register_tool, - experimental!(register_tool), - ), + gated!(ffi_pure, experimental!(ffi_pure)), + gated!(ffi_const, experimental!(ffi_const)), + gated!(register_tool, experimental!(register_tool)), // `#[cfi_encoding = ""]` - gated!( - cfi_encoding, - experimental!(cfi_encoding) - ), + gated!(cfi_encoding, experimental!(cfi_encoding)), // `#[coroutine]` attribute to be applied to closures to make them coroutines instead - gated!( - coroutine, - coroutines, experimental!(coroutine) - ), + gated!(coroutine, coroutines, experimental!(coroutine)), // RFC 3543 // `#[patchable_function_entry(prefix_nops = m, entry_nops = n)]` - gated!( - patchable_function_entry, - experimental!(patchable_function_entry) - ), + gated!(patchable_function_entry, experimental!(patchable_function_entry)), // The `#[loop_match]` and `#[const_continue]` attributes are part of the // lang experiment for RFC 3720 tracked in: // // - https://github.com/rust-lang/rust/issues/132306 - gated!( - const_continue, - loop_match, experimental!(const_continue) - ), - gated!( - loop_match, - loop_match, experimental!(loop_match) - ), + gated!(const_continue, loop_match, experimental!(const_continue)), + gated!(loop_match, loop_match, experimental!(loop_match)), // The `#[pin_v2]` attribute is part of the `pin_ergonomics` experiment // that allows structurally pinning, tracked in: // // - https://github.com/rust-lang/rust/issues/130494 - gated!( - pin_v2, - pin_ergonomics, experimental!(pin_v2), - ), + gated!(pin_v2, pin_ergonomics, experimental!(pin_v2)), // ========================================================================== // Internal attributes: Stability, deprecation, and unsafe: // ========================================================================== - ungated!( - feature, - ), + ungated!(feature), // DuplicatesOk since it has its own validation - ungated!( - stable, - ), - ungated!( - unstable, - ), + ungated!(stable), + ungated!(unstable), ungated!(unstable_feature_bound), ungated!(unstable_removed), ungated!(rustc_const_unstable), @@ -636,24 +454,12 @@ pub static BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[ // Internal attributes: Runtime related: // ========================================================================== - rustc_attr!( - rustc_allocator, - ), - rustc_attr!( - rustc_nounwind, - ), - rustc_attr!( - rustc_reallocator, - ), - rustc_attr!( - rustc_deallocator, - ), - rustc_attr!( - rustc_allocator_zeroed, - ), - rustc_attr!( - rustc_allocator_zeroed_variant, - ), + rustc_attr!(rustc_allocator), + rustc_attr!(rustc_nounwind), + rustc_attr!(rustc_reallocator), + rustc_attr!(rustc_deallocator), + rustc_attr!(rustc_allocator_zeroed), + rustc_attr!(rustc_allocator_zeroed_variant), gated!( default_lib_allocator, allocator_internals, experimental!(default_lib_allocator), @@ -720,49 +526,31 @@ pub static BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[ rustc_on_unimplemented, "see `#[diagnostic::on_unimplemented]` for the stable equivalent of this attribute" ), - rustc_attr!( - rustc_confusables, - ), + rustc_attr!(rustc_confusables), // Enumerates "identity-like" conversion methods to suggest on type mismatch. - rustc_attr!( - rustc_conversion_suggestion, - ), + rustc_attr!(rustc_conversion_suggestion), // Prevents field reads in the marked trait or method to be considered // during dead code analysis. - rustc_attr!( - rustc_trivial_field_reads, - ), + rustc_attr!(rustc_trivial_field_reads), // Used by the `rustc::potential_query_instability` lint to warn methods which // might not be stable during incremental compilation. - rustc_attr!( - rustc_lint_query_instability, - ), + rustc_attr!(rustc_lint_query_instability), // Used by the `rustc::untracked_query_information` lint to warn methods which // might not be stable during incremental compilation. - rustc_attr!( - rustc_lint_untracked_query_information, - ), + rustc_attr!(rustc_lint_untracked_query_information), // Used by the `rustc::bad_opt_access` lint to identify `DebuggingOptions` and `CodegenOptions` // types (as well as any others in future). - rustc_attr!( - rustc_lint_opt_ty, - ), + rustc_attr!(rustc_lint_opt_ty), // Used by the `rustc::bad_opt_access` lint on fields // types (as well as any others in future). - rustc_attr!( - rustc_lint_opt_deny_field_access, - ), + rustc_attr!(rustc_lint_opt_deny_field_access), // ========================================================================== // Internal attributes, Const related: // ========================================================================== - rustc_attr!( - rustc_promotable, - ), - rustc_attr!( - rustc_legacy_const_generics, - ), + rustc_attr!(rustc_promotable), + rustc_attr!(rustc_legacy_const_generics), // Do not const-check this function's body. It will always get replaced during CTFE via `hook_special_const_fn`. rustc_attr!( rustc_do_not_const_check, @@ -873,7 +661,6 @@ pub static BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[ BuiltinAttribute { name: sym::rustc_diagnostic_item, - safety: AttributeSafety::Normal, gate: Gated { feature: sym::rustc_attrs, message: "use of an internal attribute", @@ -961,99 +748,39 @@ pub static BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[ // ========================================================================== rustc_attr!(TEST, rustc_effective_visibility), - rustc_attr!( - TEST, rustc_dump_inferred_outlives, - ), - rustc_attr!( - TEST, rustc_capture_analysis, - ), - rustc_attr!( - TEST, rustc_insignificant_dtor, - ), - rustc_attr!( - TEST, rustc_no_implicit_bounds, - ), - rustc_attr!( - TEST, rustc_strict_coherence, - ), - rustc_attr!( - TEST, rustc_dump_variances, - ), - rustc_attr!( - TEST, rustc_dump_variances_of_opaques, - ), - rustc_attr!( - TEST, rustc_dump_hidden_type_of_opaques, - ), - rustc_attr!( - TEST, rustc_dump_layout, - ), - rustc_attr!( - TEST, rustc_abi, - ), - rustc_attr!( - TEST, rustc_regions, - ), - rustc_attr!( - TEST, rustc_delayed_bug_from_inside_query, - ), - rustc_attr!( - TEST, rustc_dump_user_args, - ), - rustc_attr!( - TEST, rustc_evaluate_where_clauses, - ), - rustc_attr!( - TEST, rustc_if_this_changed, - ), - rustc_attr!( - TEST, rustc_then_this_would_need, - ), - rustc_attr!( - TEST, rustc_clean, - ), - rustc_attr!( - TEST, rustc_partition_reused, - ), - rustc_attr!( - TEST, rustc_partition_codegened, - ), - rustc_attr!( - TEST, rustc_expected_cgu_reuse, - ), - rustc_attr!( - TEST, rustc_dump_symbol_name, - ), - rustc_attr!( - TEST, rustc_dump_def_path, - ), - rustc_attr!( - TEST, rustc_mir, - ), + rustc_attr!(TEST, rustc_dump_inferred_outlives), + rustc_attr!(TEST, rustc_capture_analysis,), + rustc_attr!(TEST, rustc_insignificant_dtor), + rustc_attr!(TEST, rustc_no_implicit_bounds), + rustc_attr!(TEST, rustc_strict_coherence), + rustc_attr!(TEST, rustc_dump_variances), + rustc_attr!(TEST, rustc_dump_variances_of_opaques), + rustc_attr!(TEST, rustc_dump_hidden_type_of_opaques), + rustc_attr!(TEST, rustc_dump_layout), + rustc_attr!(TEST, rustc_abi), + rustc_attr!(TEST, rustc_regions), + rustc_attr!(TEST, rustc_delayed_bug_from_inside_query), + rustc_attr!(TEST, rustc_dump_user_args), + rustc_attr!(TEST, rustc_evaluate_where_clauses), + rustc_attr!(TEST, rustc_if_this_changed), + rustc_attr!(TEST, rustc_then_this_would_need), + rustc_attr!(TEST, rustc_clean), + rustc_attr!(TEST, rustc_partition_reused), + rustc_attr!(TEST, rustc_partition_codegened), + rustc_attr!(TEST, rustc_expected_cgu_reuse), + rustc_attr!(TEST, rustc_dump_symbol_name), + rustc_attr!(TEST, rustc_dump_def_path), + rustc_attr!(TEST, rustc_mir), gated!( custom_mir, "the `#[custom_mir]` attribute is just used for the Rust test suite", ), - rustc_attr!( - TEST, rustc_dump_item_bounds, - ), - rustc_attr!( - TEST, rustc_dump_predicates, - ), - rustc_attr!( - TEST, rustc_dump_def_parents, - ), - rustc_attr!( - TEST, rustc_dump_object_lifetime_defaults, - ), - rustc_attr!( - TEST, rustc_dump_vtable, - ), - rustc_attr!( - TEST, rustc_dummy, - ), - rustc_attr!( - TEST, pattern_complexity_limit, - ), + rustc_attr!(TEST, rustc_dump_item_bounds), + rustc_attr!(TEST, rustc_dump_predicates), + rustc_attr!(TEST, rustc_dump_def_parents), + rustc_attr!(TEST, rustc_dump_object_lifetime_defaults), + rustc_attr!(TEST, rustc_dump_vtable), + rustc_attr!(TEST, rustc_dummy), + rustc_attr!(TEST, pattern_complexity_limit), ]; pub fn is_builtin_attr_name(name: Symbol) -> bool { diff --git a/compiler/rustc_feature/src/lib.rs b/compiler/rustc_feature/src/lib.rs index 34ac6b3f9a7c8..ce3ce6fcccee4 100644 --- a/compiler/rustc_feature/src/lib.rs +++ b/compiler/rustc_feature/src/lib.rs @@ -129,7 +129,7 @@ pub fn find_feature_issue(feature: Symbol, issue: GateIssue) -> Option Date: Sat, 24 Jan 2026 13:37:53 +0100 Subject: [PATCH 54/64] ImproperCTypes: Move erasing_region_normalisation into helper function Another interal change that shouldn't impact rustc users. To prepare for the upcoming split of visit_type, we reorganise the instances of `cx.tcx.try_normalize_erasing_regions(cx.typing_env(), ty).unwrap_or(ty)` into a helper function outside of the main structs. --- .../rustc_lint/src/types/improper_ctypes.rs | 57 +++++++------------ 1 file changed, 21 insertions(+), 36 deletions(-) diff --git a/compiler/rustc_lint/src/types/improper_ctypes.rs b/compiler/rustc_lint/src/types/improper_ctypes.rs index 9f10cba64cd43..865112219cc2d 100644 --- a/compiler/rustc_lint/src/types/improper_ctypes.rs +++ b/compiler/rustc_lint/src/types/improper_ctypes.rs @@ -138,6 +138,17 @@ declare_lint_pass!(ImproperCTypesLint => [ USES_POWER_ALIGNMENT ]); +/// Getting the (normalized) type out of a field (for, e.g., an enum variant or a tuple). +#[inline] +fn get_type_from_field<'tcx>( + cx: &LateContext<'tcx>, + field: &ty::FieldDef, + args: GenericArgsRef<'tcx>, +) -> Ty<'tcx> { + let field_ty = field.ty(cx.tcx, args); + cx.tcx.try_normalize_erasing_regions(cx.typing_env(), field_ty).unwrap_or(field_ty) +} + /// Check a variant of a non-exhaustive enum for improper ctypes /// /// We treat `#[non_exhaustive] enum` as "ensure that code will compile if new variants are added". @@ -365,22 +376,6 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { Self { cx, base_ty, base_fn_mode, cache: FxHashSet::default() } } - /// Checks if the given field's type is "ffi-safe". - fn check_field_type_for_ffi( - &mut self, - state: VisitorState, - field: &ty::FieldDef, - args: GenericArgsRef<'tcx>, - ) -> FfiResult<'tcx> { - let field_ty = field.ty(self.cx.tcx, args); - let field_ty = self - .cx - .tcx - .try_normalize_erasing_regions(self.cx.typing_env(), field_ty) - .unwrap_or(field_ty); - self.visit_type(state, field_ty) - } - /// Checks if the given `VariantDef`'s field types are "ffi-safe". fn check_variant_for_ffi( &mut self, @@ -394,7 +389,8 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { let transparent_with_all_zst_fields = if def.repr().transparent() { if let Some(field) = super::transparent_newtype_field(self.cx.tcx, variant) { // Transparent newtypes have at most one non-ZST field which needs to be checked.. - match self.check_field_type_for_ffi(state, field, args) { + let field_ty = get_type_from_field(self.cx, field, args); + match self.visit_type(state, field_ty) { FfiUnsafe { ty, .. } if ty.is_unit() => (), r => return r, } @@ -412,7 +408,8 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { // We can't completely trust `repr(C)` markings, so make sure the fields are actually safe. let mut all_phantom = !variant.fields.is_empty(); for field in &variant.fields { - all_phantom &= match self.check_field_type_for_ffi(state, field, args) { + let field_ty = get_type_from_field(self.cx, field, args); + all_phantom &= match self.visit_type(state, field_ty) { FfiSafe => false, // `()` fields are FFI-safe! FfiUnsafe { ty, .. } if ty.is_unit() => false, @@ -721,22 +718,11 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { } } - if let Some(ty) = self - .cx - .tcx - .try_normalize_erasing_regions(self.cx.typing_env(), ty) - .unwrap_or(ty) - .visit_with(&mut ProhibitOpaqueTypes) - .break_value() - { - Some(FfiResult::FfiUnsafe { - ty, - reason: msg!("opaque types have no C equivalent"), - help: None, - }) - } else { - None - } + ty.visit_with(&mut ProhibitOpaqueTypes).break_value().map(|ty| FfiResult::FfiUnsafe { + ty, + reason: msg!("opaque types have no C equivalent"), + help: None, + }) } /// Check if the type is array and emit an unsafe type lint. @@ -754,12 +740,11 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { /// Determine the FFI-safety of a single (MIR) type, given the context of how it is used. fn check_type(&mut self, state: VisitorState, ty: Ty<'tcx>) -> FfiResult<'tcx> { + let ty = self.cx.tcx.try_normalize_erasing_regions(self.cx.typing_env(), ty).unwrap_or(ty); if let Some(res) = self.visit_for_opaque_ty(ty) { return res; } - let ty = self.cx.tcx.try_normalize_erasing_regions(self.cx.typing_env(), ty).unwrap_or(ty); - // C doesn't really support passing arrays by value - the only way to pass an array by value // is through a struct. So, first test that the top level isn't an array, and then // recursively check the types inside. From a9d7027f3986fba29cc329c2cf5038d9c21b7583 Mon Sep 17 00:00:00 2001 From: Jules Bertholet Date: Tue, 14 Apr 2026 18:52:50 -0400 Subject: [PATCH 55/64] rustdoc: percent-encode URL fragments --- src/librustdoc/html/markdown.rs | 6 ++++-- tests/rustdoc-html/unicode.rs | 10 ++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) create mode 100644 tests/rustdoc-html/unicode.rs diff --git a/src/librustdoc/html/markdown.rs b/src/librustdoc/html/markdown.rs index 858545bd09847..2034abdfd1566 100644 --- a/src/librustdoc/html/markdown.rs +++ b/src/librustdoc/html/markdown.rs @@ -582,6 +582,7 @@ impl<'a, I: Iterator>> Iterator for HeadingLinks<'a, '_, } } let id = self.id_map.derive(id); + let percent_encoded_id = small_url_encode(id.clone()); if let Some(ref mut builder) = self.toc { let mut text_header = String::new(); @@ -596,8 +597,9 @@ impl<'a, I: Iterator>> Iterator for HeadingLinks<'a, '_, std::cmp::min(level as u32 + (self.heading_offset as u32), MAX_HEADER_LEVEL); self.buf.push_back((Event::Html(format!("").into()), 0..0)); - let start_tags = - format!("§"); + let start_tags = format!( + "§" + ); return Some((Event::Html(start_tags.into()), 0..0)); } event diff --git a/tests/rustdoc-html/unicode.rs b/tests/rustdoc-html/unicode.rs new file mode 100644 index 0000000000000..a961f178ec3b1 --- /dev/null +++ b/tests/rustdoc-html/unicode.rs @@ -0,0 +1,10 @@ +#![crate_name = "unicode"] + +pub struct Foo; + +impl Foo { + //@ has unicode/struct.Foo.html //a/@href "#%C3%BA" + //@ !has unicode/struct.Foo.html //a/@href "#ú" + /// # ú + pub fn foo() {} +} From 98242676d9ef57ea6e6224a45229ea65025b7a84 Mon Sep 17 00:00:00 2001 From: Martin Nordholts Date: Thu, 16 Apr 2026 05:57:55 +0200 Subject: [PATCH 56/64] tests/debuginfo/basic-stepping.rs: Remove FIXME related to ZSTs We don't consider it a bug that users can't break on initialization of some non-zero sized types (see comment on `maximally-steppable` at the top of the file), so it does not make sense to consider it a bug that users can't break on initialization of some zero-sized types. --- tests/debuginfo/basic-stepping.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/debuginfo/basic-stepping.rs b/tests/debuginfo/basic-stepping.rs index a4410c70ba38a..f81c5cf7d3565 100644 --- a/tests/debuginfo/basic-stepping.rs +++ b/tests/debuginfo/basic-stepping.rs @@ -142,8 +142,8 @@ fn main () { let a = (); // #break let b : [i32; 0] = []; - // FIXME(#97083): Should we be able to break on initialization of zero-sized types? - // FIXME(#97083): Right now the first breakable line is: + // The above lines initialize zero-sized types. That does not emit machine + // code, so the first breakable line is: let mut c = 27; let d = c = 99; let e = "hi bob"; From 52ad8c071cda74ae9465fb54bb350396db294735 Mon Sep 17 00:00:00 2001 From: Shivendra Sharma Date: Wed, 8 Apr 2026 03:30:00 +0530 Subject: [PATCH 57/64] rustdoc: preserve `doc(cfg)` on locally re-exported type aliases When a type alias is locally re-exported from a private module (an implicit inline), rustdoc drops its `cfg` attributes because it treats it like a standard un-inlined re-export. Since type aliases have no inner fields to carry the `cfg` badge (unlike structs or enums), the portability info is lost entirely. This patch explicitly preserves the target's `cfg` metadata when the generated item is a `TypeAliasItem`, ensuring the portability badge renders correctly without breaking standard cross-crate re-export behavior. --- src/librustdoc/clean/mod.rs | 3 +- .../reexport/type-alias-reexport.rs | 34 +++++++++++++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 tests/rustdoc-html/reexport/type-alias-reexport.rs diff --git a/src/librustdoc/clean/mod.rs b/src/librustdoc/clean/mod.rs index 5366a0eca3293..d628889450a0c 100644 --- a/src/librustdoc/clean/mod.rs +++ b/src/librustdoc/clean/mod.rs @@ -205,7 +205,8 @@ fn generate_item_with_correct_attrs( attrs.extend(get_all_import_attributes(cx, import_id, def_id, is_inline)); is_inline = is_inline || import_is_inline; } - add_without_unwanted_attributes(&mut attrs, target_attrs, is_inline, None); + let keep_target_cfg = is_inline || matches!(kind, ItemKind::TypeAliasItem(..)); + add_without_unwanted_attributes(&mut attrs, target_attrs, keep_target_cfg, None); attrs } else { // We only keep the item's attributes. diff --git a/tests/rustdoc-html/reexport/type-alias-reexport.rs b/tests/rustdoc-html/reexport/type-alias-reexport.rs new file mode 100644 index 0000000000000..1bcdff88e22c5 --- /dev/null +++ b/tests/rustdoc-html/reexport/type-alias-reexport.rs @@ -0,0 +1,34 @@ +// Regression test for . +// This test ensures that auto-generated and explicit `doc(cfg)` attributes are correctly +// preserved for locally re-exported type aliases. + +//@ compile-flags: --cfg feature="foo" + +#![crate_name = "foo"] +#![feature(doc_cfg)] + +mod inner { + #[cfg(feature = "foo")] + pub type One = u32; + + #[doc(cfg(feature = "foo"))] + pub type Two = u32; +} + +//@ has 'foo/index.html' +// There should be two items in the type aliases table. +//@ count - '//*[@class="item-table"]/dt' 2 +// Both of them should have the portability badge in the module index. +//@ count - '//*[@class="item-table"]/dt/*[@class="stab portability"]' 2 + +//@ has 'foo/type.One.html' +// Check that the individual type page has the portability badge. +//@ count - '//*[@id="main-content"]/*[@class="item-info"]/*[@class="stab portability"]' 1 +//@ has - '//*[@id="main-content"]/*[@class="item-info"]/*[@class="stab portability"]' 'foo' + +//@ has 'foo/type.Two.html' +// Check the explicit doc(cfg) type page as well. +//@ count - '//*[@id="main-content"]/*[@class="item-info"]/*[@class="stab portability"]' 1 +//@ has - '//*[@id="main-content"]/*[@class="item-info"]/*[@class="stab portability"]' 'foo' + +pub use self::inner::{One, Two}; From 0529b94578aba5147649516dcf0186eb193f2e40 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Thu, 16 Apr 2026 18:16:27 +1000 Subject: [PATCH 58/64] Move `Token` impl block. For no apparent reason it's in a different file to `Token` itself. This commit moves it. --- compiler/rustc_ast_pretty/src/pp.rs | 6 ++++++ compiler/rustc_ast_pretty/src/pp/convenience.rs | 6 ------ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/compiler/rustc_ast_pretty/src/pp.rs b/compiler/rustc_ast_pretty/src/pp.rs index 4108671a3629e..9d0888a15d8f2 100644 --- a/compiler/rustc_ast_pretty/src/pp.rs +++ b/compiler/rustc_ast_pretty/src/pp.rs @@ -188,6 +188,12 @@ pub(crate) enum Token { End, } +impl Token { + pub(crate) fn is_hardbreak_tok(&self) -> bool { + *self == Printer::hardbreak_tok_offset(0) + } +} + #[derive(Copy, Clone)] enum PrintFrame { Fits, diff --git a/compiler/rustc_ast_pretty/src/pp/convenience.rs b/compiler/rustc_ast_pretty/src/pp/convenience.rs index 9b902b38122c8..c9589535940a0 100644 --- a/compiler/rustc_ast_pretty/src/pp/convenience.rs +++ b/compiler/rustc_ast_pretty/src/pp/convenience.rs @@ -89,9 +89,3 @@ impl Printer { }); } } - -impl Token { - pub(crate) fn is_hardbreak_tok(&self) -> bool { - *self == Printer::hardbreak_tok_offset(0) - } -} From 9e940e40519573b150c85c76c593d7cd777411e1 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Thu, 16 Apr 2026 18:16:11 +1000 Subject: [PATCH 59/64] Merge `Printer` impl blocks. `rustc_ast_pretty::pp` defines `Printer` and has a 346 line `impl Printer` block for it. `rustc_ast_pretty::pp::convenience` has another `impl Printer` block with 85 lines. `rustc_ast_pretty::helpers` has another `impl Printer` block with 45 lines. This commit merges the two small `impl Printer` blocks into the bigger one, because there is no good reason for them to be separate. Doing this eliminates the `rustc_ast_pretty::pp::convenience` and `rustc_ast_pretty::helpers` modules; no great loss given that they were small and had extremely generic names. --- compiler/rustc_ast_pretty/src/helpers.rs | 49 ------- compiler/rustc_ast_pretty/src/lib.rs | 1 - compiler/rustc_ast_pretty/src/pp.rs | 129 +++++++++++++++++- .../rustc_ast_pretty/src/pp/convenience.rs | 91 ------------ 4 files changed, 128 insertions(+), 142 deletions(-) delete mode 100644 compiler/rustc_ast_pretty/src/helpers.rs delete mode 100644 compiler/rustc_ast_pretty/src/pp/convenience.rs diff --git a/compiler/rustc_ast_pretty/src/helpers.rs b/compiler/rustc_ast_pretty/src/helpers.rs deleted file mode 100644 index 34641ea2f5ae0..0000000000000 --- a/compiler/rustc_ast_pretty/src/helpers.rs +++ /dev/null @@ -1,49 +0,0 @@ -use std::borrow::Cow; - -use crate::pp::Printer; - -impl Printer { - pub fn word_space>>(&mut self, w: W) { - self.word(w); - self.space(); - } - - pub fn popen(&mut self) { - self.word("("); - } - - pub fn pclose(&mut self) { - self.word(")"); - } - - pub fn hardbreak_if_not_bol(&mut self) { - if !self.is_beginning_of_line() { - self.hardbreak() - } - } - - pub fn space_if_not_bol(&mut self) { - if !self.is_beginning_of_line() { - self.space(); - } - } - - pub fn nbsp(&mut self) { - self.word(" ") - } - - pub fn word_nbsp>>(&mut self, w: S) { - self.word(w); - self.nbsp() - } - - /// Synthesizes a comment that was not textually present in the original - /// source file. - pub fn synth_comment(&mut self, text: impl Into>) { - self.word("/*"); - self.space(); - self.word(text); - self.space(); - self.word("*/") - } -} diff --git a/compiler/rustc_ast_pretty/src/lib.rs b/compiler/rustc_ast_pretty/src/lib.rs index a7d9f89fb3df5..bfc1d387b7009 100644 --- a/compiler/rustc_ast_pretty/src/lib.rs +++ b/compiler/rustc_ast_pretty/src/lib.rs @@ -3,6 +3,5 @@ #![feature(negative_impls)] // tidy-alphabetical-end -mod helpers; pub mod pp; pub mod pprust; diff --git a/compiler/rustc_ast_pretty/src/pp.rs b/compiler/rustc_ast_pretty/src/pp.rs index 9d0888a15d8f2..c7a38d981b893 100644 --- a/compiler/rustc_ast_pretty/src/pp.rs +++ b/compiler/rustc_ast_pretty/src/pp.rs @@ -132,7 +132,6 @@ //! methods called `Printer::scan_*`, and the 'PRINT' process is the //! method called `Printer::print`. -mod convenience; mod ring; use std::borrow::Cow; @@ -485,4 +484,132 @@ impl Printer { self.out.push_str(string); self.space -= string.len() as isize; } + + /// Synthesizes a comment that was not textually present in the original + /// source file. + pub fn synth_comment(&mut self, text: impl Into>) { + self.word("/*"); + self.space(); + self.word(text); + self.space(); + self.word("*/") + } + + /// "raw box" + pub fn rbox(&mut self, indent: isize, breaks: Breaks) -> BoxMarker { + self.scan_begin(BeginToken { indent: IndentStyle::Block { offset: indent }, breaks }) + } + + /// Inconsistent breaking box + pub fn ibox(&mut self, indent: isize) -> BoxMarker { + self.rbox(indent, Breaks::Inconsistent) + } + + /// Consistent breaking box + pub fn cbox(&mut self, indent: isize) -> BoxMarker { + self.rbox(indent, Breaks::Consistent) + } + + pub fn visual_align(&mut self) -> BoxMarker { + self.scan_begin(BeginToken { indent: IndentStyle::Visual, breaks: Breaks::Consistent }) + } + + pub fn break_offset(&mut self, n: usize, off: isize) { + self.scan_break(BreakToken { + offset: off, + blank_space: n as isize, + ..BreakToken::default() + }); + } + + pub fn end(&mut self, b: BoxMarker) { + self.scan_end(b) + } + + pub fn eof(mut self) -> String { + self.scan_eof(); + self.out + } + + pub fn word>>(&mut self, wrd: S) { + let string = wrd.into(); + self.scan_string(string) + } + + pub fn word_space>>(&mut self, w: W) { + self.word(w); + self.space(); + } + + pub fn nbsp(&mut self) { + self.word(" ") + } + + pub fn word_nbsp>>(&mut self, w: S) { + self.word(w); + self.nbsp() + } + + fn spaces(&mut self, n: usize) { + self.break_offset(n, 0) + } + + pub fn zerobreak(&mut self) { + self.spaces(0) + } + + pub fn space(&mut self) { + self.spaces(1) + } + + pub fn popen(&mut self) { + self.word("("); + } + + pub fn pclose(&mut self) { + self.word(")"); + } + + pub fn hardbreak(&mut self) { + self.spaces(SIZE_INFINITY as usize) + } + + pub fn is_beginning_of_line(&self) -> bool { + match self.last_token() { + Some(last_token) => last_token.is_hardbreak_tok(), + None => true, + } + } + + pub fn hardbreak_if_not_bol(&mut self) { + if !self.is_beginning_of_line() { + self.hardbreak() + } + } + + pub fn space_if_not_bol(&mut self) { + if !self.is_beginning_of_line() { + self.space(); + } + } + + pub(crate) fn hardbreak_tok_offset(off: isize) -> Token { + Token::Break(BreakToken { + offset: off, + blank_space: SIZE_INFINITY, + ..BreakToken::default() + }) + } + + pub fn trailing_comma(&mut self) { + self.scan_break(BreakToken { pre_break: Some(','), ..BreakToken::default() }); + } + + pub fn trailing_comma_or_space(&mut self) { + self.scan_break(BreakToken { + blank_space: 1, + pre_break: Some(','), + ..BreakToken::default() + }); + } } diff --git a/compiler/rustc_ast_pretty/src/pp/convenience.rs b/compiler/rustc_ast_pretty/src/pp/convenience.rs deleted file mode 100644 index c9589535940a0..0000000000000 --- a/compiler/rustc_ast_pretty/src/pp/convenience.rs +++ /dev/null @@ -1,91 +0,0 @@ -use std::borrow::Cow; - -use crate::pp::{ - BeginToken, BoxMarker, BreakToken, Breaks, IndentStyle, Printer, SIZE_INFINITY, Token, -}; - -impl Printer { - /// "raw box" - pub fn rbox(&mut self, indent: isize, breaks: Breaks) -> BoxMarker { - self.scan_begin(BeginToken { indent: IndentStyle::Block { offset: indent }, breaks }) - } - - /// Inconsistent breaking box - pub fn ibox(&mut self, indent: isize) -> BoxMarker { - self.rbox(indent, Breaks::Inconsistent) - } - - /// Consistent breaking box - pub fn cbox(&mut self, indent: isize) -> BoxMarker { - self.rbox(indent, Breaks::Consistent) - } - - pub fn visual_align(&mut self) -> BoxMarker { - self.scan_begin(BeginToken { indent: IndentStyle::Visual, breaks: Breaks::Consistent }) - } - - pub fn break_offset(&mut self, n: usize, off: isize) { - self.scan_break(BreakToken { - offset: off, - blank_space: n as isize, - ..BreakToken::default() - }); - } - - pub fn end(&mut self, b: BoxMarker) { - self.scan_end(b) - } - - pub fn eof(mut self) -> String { - self.scan_eof(); - self.out - } - - pub fn word>>(&mut self, wrd: S) { - let string = wrd.into(); - self.scan_string(string) - } - - fn spaces(&mut self, n: usize) { - self.break_offset(n, 0) - } - - pub fn zerobreak(&mut self) { - self.spaces(0) - } - - pub fn space(&mut self) { - self.spaces(1) - } - - pub fn hardbreak(&mut self) { - self.spaces(SIZE_INFINITY as usize) - } - - pub fn is_beginning_of_line(&self) -> bool { - match self.last_token() { - Some(last_token) => last_token.is_hardbreak_tok(), - None => true, - } - } - - pub(crate) fn hardbreak_tok_offset(off: isize) -> Token { - Token::Break(BreakToken { - offset: off, - blank_space: SIZE_INFINITY, - ..BreakToken::default() - }) - } - - pub fn trailing_comma(&mut self) { - self.scan_break(BreakToken { pre_break: Some(','), ..BreakToken::default() }); - } - - pub fn trailing_comma_or_space(&mut self) { - self.scan_break(BreakToken { - blank_space: 1, - pre_break: Some(','), - ..BreakToken::default() - }); - } -} From b8ba4002f5d3d71be024e4d0ff39913e887ec510 Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Sun, 22 Feb 2026 17:32:24 +0100 Subject: [PATCH 60/64] c-variadic: handle c_int being i16 and c_double being f32 on avr --- compiler/rustc_codegen_llvm/src/intrinsic.rs | 68 +++++++++++-------- .../rustc_hir_typeck/src/fn_ctxt/checks.rs | 10 ++- library/core/src/ffi/va_list.rs | 48 ++++++++++++- .../c-link-to-rust-va-list-fn/checkrust.rs | 12 ++-- .../run-make/c-link-to-rust-va-list-fn/test.c | 2 +- 5 files changed, 99 insertions(+), 41 deletions(-) diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 0d3d682ece21f..9742f9fb3e42e 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -285,37 +285,47 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { } sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[], &[]), sym::va_arg => { - match result.layout.backend_repr { - BackendRepr::Scalar(scalar) => { - match scalar.primitive() { - Primitive::Int(..) => { - if self.cx().size_of(result.layout.ty).bytes() < 4 { - // `va_arg` should not be called on an integer type - // less than 4 bytes in length. If it is, promote - // the integer to an `i32` and truncate the result - // back to the smaller type. - let promoted_result = emit_va_arg(self, args[0], tcx.types.i32); - self.trunc(promoted_result, result.layout.llvm_type(self)) - } else { - emit_va_arg(self, args[0], result.layout.ty) - } - } - Primitive::Float(Float::F16) => { - bug!("the va_arg intrinsic does not work with `f16`") - } - Primitive::Float(Float::F64) | Primitive::Pointer(_) => { - emit_va_arg(self, args[0], result.layout.ty) - } - // `va_arg` should never be used with the return type f32. - Primitive::Float(Float::F32) => { - bug!("the va_arg intrinsic does not work with `f32`") - } - Primitive::Float(Float::F128) => { - bug!("the va_arg intrinsic does not work with `f128`") - } + let BackendRepr::Scalar(scalar) = result.layout.backend_repr else { + bug!("the va_arg intrinsic does not support non-scalar types") + }; + + match scalar.primitive() { + Primitive::Pointer(_) => { + // Pointers are always OK. + emit_va_arg(self, args[0], result.layout.ty) + } + Primitive::Int(..) => { + let int_width = self.cx().size_of(result.layout.ty).bits(); + let target_c_int_width = self.cx().sess().target.options.c_int_width; + if int_width < u64::from(target_c_int_width) { + // Smaller integer types are automatically promototed and `va_arg` + // should not be called on them. + bug!( + "va_arg got i{} but needs at least c_int (an i{})", + int_width, + target_c_int_width + ); } + emit_va_arg(self, args[0], result.layout.ty) + } + Primitive::Float(Float::F16) => { + bug!("the va_arg intrinsic does not support `f16`") + } + Primitive::Float(Float::F32) => { + if self.cx().sess().target.arch == Arch::Avr { + // c_double is actually f32 on avr. + emit_va_arg(self, args[0], result.layout.ty) + } else { + bug!("the va_arg intrinsic does not support `f32` on this target") + } + } + Primitive::Float(Float::F64) => { + // 64-bit floats are always OK. + emit_va_arg(self, args[0], result.layout.ty) + } + Primitive::Float(Float::F128) => { + bug!("the va_arg intrinsic does not support `f128`") } - _ => bug!("the va_arg intrinsic does not work with non-scalar types"), } } diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs index bb31bcbf70f1b..966f020686319 100644 --- a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs +++ b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs @@ -499,10 +499,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ty::Float(ty::FloatTy::F32) => { variadic_error(tcx.sess, arg.span, arg_ty, "c_double"); } - ty::Int(ty::IntTy::I8 | ty::IntTy::I16) | ty::Bool => { + ty::Int(ty::IntTy::I8) | ty::Bool => { variadic_error(tcx.sess, arg.span, arg_ty, "c_int"); } - ty::Uint(ty::UintTy::U8 | ty::UintTy::U16) => { + ty::Uint(ty::UintTy::U8) => { + variadic_error(tcx.sess, arg.span, arg_ty, "c_uint"); + } + ty::Int(ty::IntTy::I16) if tcx.sess.target.options.c_int_width > 16 => { + variadic_error(tcx.sess, arg.span, arg_ty, "c_int"); + } + ty::Uint(ty::UintTy::U16) if tcx.sess.target.options.c_int_width > 16 => { variadic_error(tcx.sess, arg.span, arg_ty, "c_uint"); } ty::FnDef(..) => { diff --git a/library/core/src/ffi/va_list.rs b/library/core/src/ffi/va_list.rs index f0f58a0f83430..45e25fabe3bd2 100644 --- a/library/core/src/ffi/va_list.rs +++ b/library/core/src/ffi/va_list.rs @@ -266,14 +266,17 @@ impl<'f> const Drop for VaList<'f> { mod sealed { pub trait Sealed {} + impl Sealed for i16 {} impl Sealed for i32 {} impl Sealed for i64 {} impl Sealed for isize {} + impl Sealed for u16 {} impl Sealed for u32 {} impl Sealed for u64 {} impl Sealed for usize {} + impl Sealed for f32 {} impl Sealed for f64 {} impl Sealed for *mut T {} @@ -299,22 +302,61 @@ mod sealed { // to accept unsupported types in the meantime. pub unsafe trait VaArgSafe: sealed::Sealed {} -// i8 and i16 are implicitly promoted to c_int in C, and cannot implement `VaArgSafe`. +crate::cfg_select! { + any(target_arch = "avr", target_arch = "msp430") => { + // c_int/c_uint are i16/u16 on these targets. + // + // - i8 is implicitly promoted to c_int in C, and cannot implement `VaArgSafe`. + // - u8 is implicitly promoted to c_uint in C, and cannot implement `VaArgSafe`. + unsafe impl VaArgSafe for i16 {} + unsafe impl VaArgSafe for u16 {} + } + _ => { + // c_int/c_uint are i32/u32 on this target. + // + // - i8 and i16 are implicitly promoted to c_int in C, and cannot implement `VaArgSafe`. + // - u8 and u16 are implicitly promoted to c_uint in C, and cannot implement `VaArgSafe`. + } +} + +crate::cfg_select! { + target_arch = "avr" => { + // c_double is f32 on this target. + unsafe impl VaArgSafe for f32 {} + } + _ => { + // c_double is f64 on this target. + // + // - f32 is implicitly promoted to c_double in C, and cannot implement `VaArgSafe`. + } +} + unsafe impl VaArgSafe for i32 {} unsafe impl VaArgSafe for i64 {} unsafe impl VaArgSafe for isize {} -// u8 and u16 are implicitly promoted to c_int in C, and cannot implement `VaArgSafe`. unsafe impl VaArgSafe for u32 {} unsafe impl VaArgSafe for u64 {} unsafe impl VaArgSafe for usize {} -// f32 is implicitly promoted to c_double in C, and cannot implement `VaArgSafe`. unsafe impl VaArgSafe for f64 {} unsafe impl VaArgSafe for *mut T {} unsafe impl VaArgSafe for *const T {} +// Check that relevant `core::ffi` types implement `VaArgSafe`. +const _: () = { + const fn va_arg_safe_check() {} + + va_arg_safe_check::(); + va_arg_safe_check::(); + va_arg_safe_check::(); + va_arg_safe_check::(); + va_arg_safe_check::(); + va_arg_safe_check::(); + va_arg_safe_check::(); +}; + impl<'f> VaList<'f> { /// Read an argument from the variable argument list, and advance to the next argument. /// diff --git a/tests/run-make/c-link-to-rust-va-list-fn/checkrust.rs b/tests/run-make/c-link-to-rust-va-list-fn/checkrust.rs index c522ac46d918e..109fbb1c62036 100644 --- a/tests/run-make/c-link-to-rust-va-list-fn/checkrust.rs +++ b/tests/run-make/c-link-to-rust-va-list-fn/checkrust.rs @@ -30,17 +30,17 @@ pub unsafe extern "C" fn check_list_1(mut ap: VaList) -> usize { continue_if!(ap.arg::() == '4' as c_int); continue_if!(ap.arg::() == ';' as c_int); continue_if!(ap.arg::() == 0x32); - continue_if!(ap.arg::() == 0x10000001); + continue_if!(ap.arg::() == 0x10000001); continue_if!(compare_c_str(ap.arg::<*const c_char>(), c"Valid!")); 0 } #[unsafe(no_mangle)] pub unsafe extern "C" fn check_list_2(mut ap: VaList) -> usize { - continue_if!(ap.arg::() == 3.14f64); + continue_if!(ap.arg::() == 3.14); continue_if!(ap.arg::() == 12); continue_if!(ap.arg::() == 'a' as c_int); - continue_if!(ap.arg::() == 6.28f64); + continue_if!(ap.arg::() == 6.28); continue_if!(compare_c_str(ap.arg::<*const c_char>(), c"Hello")); continue_if!(ap.arg::() == 42); continue_if!(compare_c_str(ap.arg::<*const c_char>(), c"World")); @@ -49,7 +49,7 @@ pub unsafe extern "C" fn check_list_2(mut ap: VaList) -> usize { #[unsafe(no_mangle)] pub unsafe extern "C" fn check_list_copy_0(mut ap: VaList) -> usize { - continue_if!(ap.arg::() == 6.28f64); + continue_if!(ap.arg::() == 6.28); continue_if!(ap.arg::() == 16); continue_if!(ap.arg::() == 'A' as c_int); continue_if!(compare_c_str(ap.arg::<*const c_char>(), c"Skip Me!")); @@ -66,7 +66,7 @@ pub unsafe extern "C" fn check_varargs_0(_: c_int, mut ap: ...) -> usize { #[unsafe(no_mangle)] pub unsafe extern "C" fn check_varargs_1(_: c_int, mut ap: ...) -> usize { - continue_if!(ap.arg::() == 3.14f64); + continue_if!(ap.arg::() == 3.14); continue_if!(ap.arg::() == 12); continue_if!(ap.arg::() == 'A' as c_int); continue_if!(ap.arg::() == 1); @@ -156,7 +156,7 @@ extern "C" fn run_test_variadic() -> usize { #[unsafe(no_mangle)] extern "C" fn run_test_va_list_by_value() -> usize { - unsafe extern "C" fn helper(mut ap: ...) -> usize { + unsafe extern "C" fn helper(ap: ...) -> usize { unsafe { test_va_list_by_value(ap) } } diff --git a/tests/run-make/c-link-to-rust-va-list-fn/test.c b/tests/run-make/c-link-to-rust-va-list-fn/test.c index 2bb93c0b5d0ef..b368302326c71 100644 --- a/tests/run-make/c-link-to-rust-va-list-fn/test.c +++ b/tests/run-make/c-link-to-rust-va-list-fn/test.c @@ -32,7 +32,7 @@ int test_rust(size_t (*fn)(va_list), ...) { int main(int argc, char* argv[]) { assert(test_rust(check_list_0, 0x01LL, 0x02, 0x03LL) == 0); - assert(test_rust(check_list_1, -1, 'A', '4', ';', 0x32, 0x10000001, "Valid!") == 0); + assert(test_rust(check_list_1, -1, 'A', '4', ';', 0x32, (int32_t)0x10000001, "Valid!") == 0); assert(test_rust(check_list_2, 3.14, 12l, 'a', 6.28, "Hello", 42, "World") == 0); From a875e140b6ff6735f2b93186c1d80c1d4165ae38 Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Sun, 22 Feb 2026 19:53:54 +0100 Subject: [PATCH 61/64] c-variadic: make `VaArgSafe` a lang item so that we can check whether a type implements the trait --- compiler/rustc_hir/src/lang_items.rs | 1 + .../rustc_hir_typeck/src/fn_ctxt/checks.rs | 24 ++++++++++++------- compiler/rustc_span/src/symbol.rs | 1 + library/core/src/ffi/va_list.rs | 1 + 4 files changed, 19 insertions(+), 8 deletions(-) diff --git a/compiler/rustc_hir/src/lang_items.rs b/compiler/rustc_hir/src/lang_items.rs index c144f0b7dbc5b..91a5415039ac0 100644 --- a/compiler/rustc_hir/src/lang_items.rs +++ b/compiler/rustc_hir/src/lang_items.rs @@ -221,6 +221,7 @@ language_item_table! { UnsafeCell, sym::unsafe_cell, unsafe_cell_type, Target::Struct, GenericRequirement::None; UnsafePinned, sym::unsafe_pinned, unsafe_pinned_type, Target::Struct, GenericRequirement::None; + VaArgSafe, sym::va_arg_safe, va_arg_safe, Target::Trait, GenericRequirement::None; VaList, sym::va_list, va_list, Target::Struct, GenericRequirement::None; Deref, sym::deref, deref_trait, Target::Trait, GenericRequirement::Exact(0); diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs index 966f020686319..b084705425a49 100644 --- a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs +++ b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs @@ -494,21 +494,29 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // There are a few types which get autopromoted when passed via varargs // in C but we just error out instead and require explicit casts. + // + // We use implementations of VaArgSafe as the source of truth. On some embedded + // targets, c_double is f32 and c_int/c_uing are i16/u16, and these types implement + // VaArgSafe there. On all other targets, these types do not implement VaArgSafe. + // + // cfg(bootstrap): change the if let to an unwrap. let arg_ty = self.structurally_resolve_type(arg.span, arg_ty); + if let Some(trait_def_id) = tcx.lang_items().va_arg_safe() + && self + .type_implements_trait(trait_def_id, [arg_ty], self.param_env) + .must_apply_modulo_regions() + { + continue; + } + match arg_ty.kind() { ty::Float(ty::FloatTy::F32) => { variadic_error(tcx.sess, arg.span, arg_ty, "c_double"); } - ty::Int(ty::IntTy::I8) | ty::Bool => { - variadic_error(tcx.sess, arg.span, arg_ty, "c_int"); - } - ty::Uint(ty::UintTy::U8) => { - variadic_error(tcx.sess, arg.span, arg_ty, "c_uint"); - } - ty::Int(ty::IntTy::I16) if tcx.sess.target.options.c_int_width > 16 => { + ty::Int(ty::IntTy::I8 | ty::IntTy::I16) | ty::Bool => { variadic_error(tcx.sess, arg.span, arg_ty, "c_int"); } - ty::Uint(ty::UintTy::U16) if tcx.sess.target.options.c_int_width > 16 => { + ty::Uint(ty::UintTy::U8 | ty::UintTy::U16) => { variadic_error(tcx.sess, arg.span, arg_ty, "c_uint"); } ty::FnDef(..) => { diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index 80d1c91c81ddc..718d9e22626eb 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -2209,6 +2209,7 @@ symbols! { v1, v8plus, va_arg, + va_arg_safe, va_copy, va_end, va_list, diff --git a/library/core/src/ffi/va_list.rs b/library/core/src/ffi/va_list.rs index 45e25fabe3bd2..034e4ad728b8a 100644 --- a/library/core/src/ffi/va_list.rs +++ b/library/core/src/ffi/va_list.rs @@ -300,6 +300,7 @@ mod sealed { // We may unseal this trait in the future, but currently our `va_arg` implementations don't support // types with an alignment larger than 8, or with a non-scalar layout. Inline assembly can be used // to accept unsupported types in the meantime. +#[lang = "va_arg_safe"] pub unsafe trait VaArgSafe: sealed::Sealed {} crate::cfg_select! { From 78a465a86d7c0239f8190830b1288489caac3a8d Mon Sep 17 00:00:00 2001 From: cijiugechu Date: Thu, 16 Apr 2026 18:09:36 +0800 Subject: [PATCH 62/64] Use `box_new` diagnostic item for Box::new suggestions --- compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs index a2f4c57bd442c..b5d138f183b99 100644 --- a/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs +++ b/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs @@ -3107,14 +3107,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { { let deref_kind = if checked_ty.is_box() { // detect Box::new(..) - // FIXME: use `box_new` diagnostic item instead? if let ExprKind::Call(box_new, [_]) = expr.kind && let ExprKind::Path(qpath) = &box_new.kind && let Res::Def(DefKind::AssocFn, fn_id) = self.typeck_results.borrow().qpath_res(qpath, box_new.hir_id) - && let Some(impl_id) = self.tcx.inherent_impl_of_assoc(fn_id) - && self.tcx.type_of(impl_id).skip_binder().is_box() - && self.tcx.item_name(fn_id) == sym::new + && self.tcx.is_diagnostic_item(sym::box_new, fn_id) { let l_paren = self.tcx.sess.source_map().next_point(box_new.span); let r_paren = self.tcx.sess.source_map().end_point(expr.span); From d0f5b5caa865445bef2f59d1dd0f339a0750434f Mon Sep 17 00:00:00 2001 From: Daria Sukhonina Date: Thu, 16 Apr 2026 15:19:42 +0300 Subject: [PATCH 63/64] Replace redundant unwrap with get_or_insert_with --- compiler/rustc_middle/src/query/job.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/compiler/rustc_middle/src/query/job.rs b/compiler/rustc_middle/src/query/job.rs index 24c4daf9855d2..8c78bf24287e0 100644 --- a/compiler/rustc_middle/src/query/job.rs +++ b/compiler/rustc_middle/src/query/job.rs @@ -36,10 +36,7 @@ impl<'tcx> QueryJob<'tcx> { } pub fn latch(&mut self) -> QueryLatch<'tcx> { - if self.latch.is_none() { - self.latch = Some(QueryLatch::new()); - } - self.latch.as_ref().unwrap().clone() + self.latch.get_or_insert_with(QueryLatch::new).clone() } /// Signals to waiters that the query is complete. From 4645f036d054cbc083194deb8d69c57cd3cd8317 Mon Sep 17 00:00:00 2001 From: mejrs <59372212+mejrs@users.noreply.github.com> Date: Thu, 16 Apr 2026 15:48:06 +0200 Subject: [PATCH 64/64] triagebot: notify on diagnostic attribute changes --- triagebot.toml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/triagebot.toml b/triagebot.toml index 7708bdbceffcb..dd5bc55cfe784 100644 --- a/triagebot.toml +++ b/triagebot.toml @@ -1451,6 +1451,12 @@ code; adding it needs t-lang approval. """ cc = ["@rust-lang/wg-const-eval"] +[mentions."compiler/rustc_attr_parsing/src/attributes/diagnostic"] +message = "Some changes occurred to diagnostic attributes." +cc = ["@mejrs"] +[mentions."compiler/rustc_hir/src/attrs/diagnostic.rs"] +message = "Some changes occurred to diagnostic attributes." +cc = ["@mejrs"] # ------------------------------------------------------------------------------ # PR assignments